mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-11-16 16:54:51 +02:00
Merge branch '2.0' into 2.1
This commit is contained in:
commit
273a711459
72
CHANGELOG
72
CHANGELOG
@ -1,5 +1,77 @@
|
||||
# Change Log
|
||||
|
||||
## 2.0.0rc4 20/04/2017
|
||||
|
||||
* Fix a race condition when handling error at project opening
|
||||
* Fix an issue with editing network on windows
|
||||
* Fix windows tests
|
||||
* Catch timeout error on docker
|
||||
* typing is already included in Py >= 3.5 (#979)
|
||||
* Fix import of some old topologies
|
||||
* Fix AttributeError: 'NoneType' object has no attribute 'returncode'
|
||||
* Fix ghost vmware vms
|
||||
* Fix required field in schema not use
|
||||
* Catch error and log them when we can't write the config
|
||||
* Fix bridge 'bridge0' already exist when we have trouble with a container
|
||||
* Catch an error at startup when the remote GNS3 VM is not a real GNS3 VM
|
||||
* Fixes Qemu sata option. Ref #875.
|
||||
* Catch GNS3 VM loading error at startup
|
||||
|
||||
## 1.5.4 13/04/2017
|
||||
|
||||
* Fix VPCS tests for recent version
|
||||
* Freeze server dependencies to the same version used for 1.5.3
|
||||
* Fix 1.5: Error message, when stopping IOU router #769
|
||||
* Drop color logging for remote install, seem to fail in some conditions
|
||||
* Cleanup the remote install script
|
||||
* Support for Xenial in remote install
|
||||
|
||||
## 2.0.0rc3 31/03/2017
|
||||
|
||||
* Support IOU image without .bin at the end
|
||||
* Allow to change some properties of an already connected ethernet switch
|
||||
* Ensure we start only one ubridge
|
||||
* Catch some broken hostname for compute node
|
||||
* Fix limit of 20 docker containers
|
||||
* Fix race conditions in creation of Frame Relay Switch
|
||||
* Fix conversion of project from 1.X with custom symbol for cloud
|
||||
* Dissallow parallel pull of docker images
|
||||
* Add a scripts for running current dev version on GNS3 VM
|
||||
* Fix a crash with missing size in the svg files
|
||||
* Fix an utf8 error in auth code
|
||||
* Improve vmrun timeout message
|
||||
* Support utf-8 characters in user and password for auth
|
||||
* Handle password configuration change on remote servers
|
||||
* Fix Bug when delete fake-running VMBox
|
||||
* Fix Can't connect to compute local on some computers
|
||||
* Add a modification uuid to settings returned by the server
|
||||
* Check python version in setup.py only for install
|
||||
* Fix Session is closed when listing docker images
|
||||
* Cleanup docker source code
|
||||
* Use aiohttp session for docker queries
|
||||
* Escape special characters from SVG text
|
||||
* Fix some port short name display issues
|
||||
* Catch server disconnected errors from computes
|
||||
* Generate a node uuid if the uuid is missing in the .gns3
|
||||
* Ensure to dump project before exporting it
|
||||
* Fix return code check for SIGSEGV of IOU images
|
||||
* Prevent vmname change for VirtualBox linked clone
|
||||
* Upgrade to aiohttp 1.3.5 to solve issue with big file
|
||||
* Handle some invalid svg
|
||||
* Try to fix some 1.3 topology with corrupted data
|
||||
* Fix ComputeError: Can't connect to Main server
|
||||
* Catch error when the server as trouble to access to itself
|
||||
* Catch a timeout error in docker
|
||||
* Lock yarl version because 0.10 is not compatible with aiohttp 1.3
|
||||
* Raise error if image are not avaible on main server during export
|
||||
* Fix a race condition when killing ubridge
|
||||
* If your settings from 1.X are broken with skip them at import
|
||||
* Catch a permission error on symbols
|
||||
* Catch unicode error when you try to duplicate a project with invalid characters
|
||||
* Catch error when you try to put an invalid server url
|
||||
* Fix an error when handling ubridge errors
|
||||
* Fix crash when handling an error in project creation
|
||||
|
||||
## 2.0.0rc2 10/03/2017
|
||||
|
||||
* Drop color logging for remote install, seem to fail in some conditions
|
||||
|
@ -216,3 +216,8 @@ If you want test coverage:
|
||||
.. code:: bash
|
||||
|
||||
py.test --cov-report term-missing --cov=gns3server
|
||||
|
||||
Security issues
|
||||
----------------
|
||||
Please contact us using contact informations available here:
|
||||
http://docs.gns3.com/1ON9JBXSeR7Nt2-Qum2o3ZX0GU86BZwlmNSUgvmqNWGY/index.html
|
||||
|
21
appveyor.yml
Normal file
21
appveyor.yml
Normal file
@ -0,0 +1,21 @@
|
||||
version: '{build}-{branch}'
|
||||
|
||||
image: Visual Studio 2015
|
||||
|
||||
platform: x64
|
||||
|
||||
environment:
|
||||
PYTHON: "C:\\Python36-x64"
|
||||
DISTUTILS_USE_SDK: "1"
|
||||
API_TOKEN:
|
||||
secure: VEKn4bYH3QO0ixtQW5ni4Enmn8cS1NlZV246ludBDgQ=
|
||||
|
||||
install:
|
||||
- cinst nmap
|
||||
- "%PYTHON%\\python.exe -m pip install -r dev-requirements.txt"
|
||||
- "%PYTHON%\\python.exe -m pip install -r win-requirements.txt"
|
||||
|
||||
build: off
|
||||
|
||||
test_script:
|
||||
- "%PYTHON%\\python.exe -m pytest -v"
|
@ -1,6 +1,6 @@
|
||||
-rrequirements.txt
|
||||
|
||||
sphinx==1.5.3
|
||||
sphinx==1.5.5
|
||||
pytest==3.0.7
|
||||
pep8==1.7.0
|
||||
pytest-catchlog==1.2.2
|
||||
|
@ -20,10 +20,10 @@ Docker server module.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import asyncio
|
||||
import logging
|
||||
import aiohttp
|
||||
import json
|
||||
from gns3server.utils import parse_version
|
||||
from gns3server.utils.asyncio import locked_coroutine
|
||||
from gns3server.compute.base_manager import BaseManager
|
||||
@ -33,7 +33,7 @@ from gns3server.compute.docker.docker_error import DockerError, DockerHttp304Err
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DOCKER_MINIMUM_API_VERSION = "1.21"
|
||||
DOCKER_MINIMUM_API_VERSION = "1.25"
|
||||
|
||||
|
||||
class Docker(BaseManager):
|
||||
@ -113,7 +113,7 @@ class Docker(BaseManager):
|
||||
:returns: HTTP response
|
||||
"""
|
||||
data = json.dumps(data)
|
||||
url = "http://docker/" + path
|
||||
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
|
||||
|
||||
if timeout is None:
|
||||
timeout = 60 * 60 * 24 * 31 # One month timeout
|
||||
@ -134,6 +134,8 @@ class Docker(BaseManager):
|
||||
)
|
||||
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
|
||||
raise DockerError("Docker has returned an error: {}".format(str(e)))
|
||||
except (asyncio.TimeoutError):
|
||||
raise DockerError("Docker timeout " + method + " " + path)
|
||||
if response.status >= 300:
|
||||
body = yield from response.read()
|
||||
try:
|
||||
@ -187,7 +189,10 @@ class Docker(BaseManager):
|
||||
# The pull api will stream status via an HTTP JSON stream
|
||||
content = ""
|
||||
while True:
|
||||
chunk = yield from response.content.read(1024)
|
||||
try:
|
||||
chunk = yield from response.content.read(1024)
|
||||
except aiohttp.errors.ServerDisconnectedError:
|
||||
break
|
||||
if not chunk:
|
||||
break
|
||||
content += chunk.decode("utf-8")
|
||||
|
@ -361,6 +361,7 @@ class DockerVM(BaseNode):
|
||||
try:
|
||||
yield from self._add_ubridge_connection(nio, adapter_number)
|
||||
except UbridgeNamespaceError:
|
||||
log.error("Container {} failed to start", self.name)
|
||||
yield from self.stop()
|
||||
|
||||
# The container can crash soon after the start, this means we can not move the interface to the container namespace
|
||||
@ -517,6 +518,8 @@ class DockerVM(BaseNode):
|
||||
state = yield from self._get_container_state()
|
||||
if state == "running":
|
||||
return True
|
||||
if self.status == "started": # The container crashed we need to clean
|
||||
yield from self.stop()
|
||||
return False
|
||||
|
||||
@asyncio.coroutine
|
||||
|
@ -1379,7 +1379,7 @@ class QemuVM(BaseNode):
|
||||
# special case, sata controller doesn't exist in Qemu
|
||||
options.extend(["-device", 'ahci,id=ahci{},bus=pci.{}'.format(disk_index, disk_index)])
|
||||
options.extend(["-drive", 'file={},if=none,id=drive-sata-disk{},index={},media=disk'.format(disk, disk_index, disk_index)])
|
||||
options.extend(["-device", 'ide-drive,drive=drive-sata-disk{},bus=ahci{}.0'.format(disk_index, disk_index)])
|
||||
options.extend(["-device", 'ide-drive,drive=drive-sata-disk{},bus=ahci{}.0,id=drive-sata-disk{}'.format(disk_index, disk_index, disk_index)])
|
||||
else:
|
||||
options.extend(["-drive", 'file={},if={},index={},media=disk'.format(disk, interface, disk_index)])
|
||||
|
||||
|
@ -588,8 +588,9 @@ class VMware(BaseManager):
|
||||
|
||||
for vm_settings in vm_entries.values():
|
||||
if "displayname" in vm_settings and "config" in vm_settings:
|
||||
log.debug('Found VM named "{}" with VMX file "{}"'.format(vm_settings["displayname"], vm_settings["config"]))
|
||||
vmware_vms.append({"vmname": vm_settings["displayname"], "vmx_path": vm_settings["config"]})
|
||||
if os.path.exists(vm_settings["config"]):
|
||||
log.debug('Found VM named "{}" with VMX file "{}"'.format(vm_settings["displayname"], vm_settings["config"]))
|
||||
vmware_vms.append({"vmname": vm_settings["displayname"], "vmx_path": vm_settings["config"]})
|
||||
return vmware_vms
|
||||
|
||||
def _get_vms_from_directory(self, directory):
|
||||
|
@ -35,6 +35,7 @@ from ..version import __version__
|
||||
from .topology import load_topology
|
||||
from .gns3vm import GNS3VM
|
||||
from ..utils.get_resource import get_resource
|
||||
from .gns3vm.gns3_vm_error import GNS3VMError
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
@ -159,10 +160,13 @@ class Controller:
|
||||
for c in computes:
|
||||
try:
|
||||
yield from self.add_compute(**c)
|
||||
except aiohttp.web_exceptions.HTTPConflict:
|
||||
except (aiohttp.web_exceptions.HTTPConflict):
|
||||
pass # Skip not available servers at loading
|
||||
yield from self.load_projects()
|
||||
yield from self.gns3vm.auto_start_vm()
|
||||
try:
|
||||
yield from self.gns3vm.auto_start_vm()
|
||||
except GNS3VMError as e:
|
||||
log.warn(str(e))
|
||||
yield from self._project_auto_open()
|
||||
|
||||
def _update_config(self):
|
||||
@ -215,9 +219,12 @@ class Controller:
|
||||
"password": c.password,
|
||||
"compute_id": c.id
|
||||
})
|
||||
os.makedirs(os.path.dirname(self._config_file), exist_ok=True)
|
||||
with open(self._config_file, 'w+') as f:
|
||||
json.dump(data, f, indent=4)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(self._config_file), exist_ok=True)
|
||||
with open(self._config_file, 'w+') as f:
|
||||
json.dump(data, f, indent=4)
|
||||
except OSError as e:
|
||||
log.error("Can't write the configuration {}: {}".format(self._config_file, str(e)))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _load_controller_settings(self):
|
||||
|
@ -18,6 +18,7 @@
|
||||
import sys
|
||||
import copy
|
||||
import asyncio
|
||||
import aiohttp
|
||||
|
||||
from ...utils.asyncio import locked_coroutine
|
||||
from .vmware_gns3_vm import VMwareGNS3VM
|
||||
@ -242,10 +243,13 @@ class GNS3VM:
|
||||
yield from self.start()
|
||||
except GNS3VMError as e:
|
||||
# User will receive the error later when they will try to use the node
|
||||
yield from self._controller.add_compute(compute_id="vm",
|
||||
name="GNS3 VM ({})".format(self.current_engine().vmname),
|
||||
host=None,
|
||||
force=True)
|
||||
try:
|
||||
yield from self._controller.add_compute(compute_id="vm",
|
||||
name="GNS3 VM ({})".format(self.current_engine().vmname),
|
||||
host=None,
|
||||
force=True)
|
||||
except aiohttp.web.HTTPConflict:
|
||||
pass
|
||||
log.error("Can't start the GNS3 VM: {}", str(e))
|
||||
|
||||
@asyncio.coroutine
|
||||
|
@ -674,7 +674,7 @@ class Project:
|
||||
self.dump()
|
||||
# We catch all error to be able to rollback the .gns3 to the previous state
|
||||
except Exception as e:
|
||||
for compute in self._project_created_on_compute:
|
||||
for compute in list(self._project_created_on_compute):
|
||||
try:
|
||||
yield from compute.post("/projects/{}/close".format(self._id))
|
||||
# We don't care if a compute is down at this step
|
||||
|
@ -291,8 +291,11 @@ def _convert_1_3_later(topo, topo_path):
|
||||
except KeyError:
|
||||
node["compute_id"] = "local"
|
||||
node["console_type"] = old_node["properties"].get("console_type", "telnet")
|
||||
node["name"] = old_node["label"]["text"]
|
||||
node["label"] = _convert_label(old_node["label"])
|
||||
if "label" in old_node:
|
||||
node["name"] = old_node["label"]["text"]
|
||||
node["label"] = _convert_label(old_node["label"])
|
||||
else:
|
||||
node["name"] = old_node["properties"]["name"]
|
||||
node["node_id"] = old_node.get("vm_id", str(uuid.uuid4()))
|
||||
|
||||
node["symbol"] = old_node.get("symbol", None)
|
||||
|
@ -54,7 +54,7 @@ class CrashReport:
|
||||
Report crash to a third party service
|
||||
"""
|
||||
|
||||
DSN = "sync+https://7c028290d17b4035916285b304d42311:ddf752e704c7423cacab93f8e34f713c@sentry.io/38482"
|
||||
DSN = "sync+https://19cca90b55874be5862caf9b507fbd7b:1c0897efd092467a874e89b2e4803b29@sentry.io/38482"
|
||||
if hasattr(sys, "frozen"):
|
||||
cacert = get_resource("cacert.pem")
|
||||
if cacert is not None and os.path.isfile(cacert):
|
||||
|
@ -20,6 +20,7 @@ import aiohttp
|
||||
|
||||
from gns3server.web.route import Route
|
||||
from gns3server.controller import Controller
|
||||
from gns3server.utils import force_unix_path
|
||||
|
||||
from gns3server.schemas.node import (
|
||||
NODE_OBJECT_SCHEMA,
|
||||
@ -337,7 +338,7 @@ class NodeHandler:
|
||||
project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
path = request.match_info["path"]
|
||||
path = os.path.normpath(path)
|
||||
path = force_unix_path(path)
|
||||
|
||||
# Raise error if user try to escape
|
||||
if path[0] == ".":
|
||||
|
230
gns3server/handlers/api/vpcs_handler.py
Normal file
230
gns3server/handlers/api/vpcs_handler.py
Normal file
@ -0,0 +1,230 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from aiohttp.web import HTTPConflict
|
||||
from ...web.route import Route
|
||||
from ...schemas.nio import NIO_SCHEMA
|
||||
from ...schemas.vpcs import VPCS_CREATE_SCHEMA
|
||||
from ...schemas.vpcs import VPCS_UPDATE_SCHEMA
|
||||
from ...schemas.vpcs import VPCS_OBJECT_SCHEMA
|
||||
from ...modules.vpcs import VPCS
|
||||
|
||||
|
||||
class VPCSHandler:
|
||||
|
||||
"""
|
||||
API entry points for VPCS.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request",
|
||||
409: "Conflict"
|
||||
},
|
||||
description="Create a new VPCS instance",
|
||||
input=VPCS_CREATE_SCHEMA,
|
||||
output=VPCS_OBJECT_SCHEMA)
|
||||
def create(request, response):
|
||||
|
||||
vpcs = VPCS.instance()
|
||||
vm = yield from vpcs.create_vm(request.json["name"],
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
console=request.json.get("console"),
|
||||
startup_script=request.json.get("startup_script"))
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Get a VPCS instance",
|
||||
output=VPCS_OBJECT_SCHEMA)
|
||||
def show(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist",
|
||||
409: "Conflict"
|
||||
},
|
||||
description="Update a VPCS instance",
|
||||
input=VPCS_UPDATE_SCHEMA,
|
||||
output=VPCS_OBJECT_SCHEMA)
|
||||
def update(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm.name = request.json.get("name", vm.name)
|
||||
vm.console = request.json.get("console", vm.console)
|
||||
vm.startup_script = request.json.get("startup_script", vm.startup_script)
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Delete a VPCS instance")
|
||||
def delete(request, response):
|
||||
|
||||
yield from VPCS.instance().delete_vm(request.match_info["vm_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a VPCS instance",
|
||||
output=VPCS_OBJECT_SCHEMA)
|
||||
def start(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.start()
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Stop a VPCS instance")
|
||||
def stop(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Reload a VPCS instance")
|
||||
def reload(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port where the nio should be added"
|
||||
},
|
||||
status_codes={
|
||||
201: "NIO created",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Add a NIO to a VPCS instance",
|
||||
input=NIO_SCHEMA,
|
||||
output=NIO_SCHEMA)
|
||||
def create_nio(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = vpcs_manager.create_nio(vm.vpcs_path(), request.json)
|
||||
vm.port_add_nio_binding(int(request.match_info["port_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port from where the nio should be removed"
|
||||
},
|
||||
status_codes={
|
||||
204: "NIO deleted",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Remove a NIO from a VPCS instance")
|
||||
def delete_nio(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm.port_remove_nio_binding(int(request.match_info["port_number"]))
|
||||
response.set_status(204)
|
@ -163,7 +163,7 @@ def pid_lock(path):
|
||||
pid = int(f.read())
|
||||
try:
|
||||
os.kill(pid, 0) # If the proces is not running kill return an error
|
||||
except OSError:
|
||||
except (OSError, SystemError):
|
||||
pid = None
|
||||
except OSError as e:
|
||||
log.critical("Can't open pid file %s: %s", pid, str(e))
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
|
||||
ATM_SWITCH_CREATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
@ -81,5 +82,5 @@ ATM_SWITCH_OBJECT_SCHEMA = {
|
||||
"required": ["name", "node_id", "project_id"]
|
||||
}
|
||||
|
||||
ATM_SWITCH_UPDATE_SCHEMA = ATM_SWITCH_OBJECT_SCHEMA
|
||||
ATM_SWITCH_UPDATE_SCHEMA = copy.deepcopy(ATM_SWITCH_OBJECT_SCHEMA)
|
||||
del ATM_SWITCH_UPDATE_SCHEMA["required"]
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
from .port import PORT_OBJECT_SCHEMA
|
||||
|
||||
HOST_INTERFACE_SCHEMA = {
|
||||
@ -136,5 +137,5 @@ CLOUD_OBJECT_SCHEMA = {
|
||||
"required": ["name", "node_id", "project_id", "ports_mapping"]
|
||||
}
|
||||
|
||||
CLOUD_UPDATE_SCHEMA = CLOUD_OBJECT_SCHEMA
|
||||
CLOUD_UPDATE_SCHEMA = copy.deepcopy(CLOUD_OBJECT_SCHEMA)
|
||||
del CLOUD_UPDATE_SCHEMA["required"]
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
from .capabilities import CAPABILITIES_SCHEMA
|
||||
|
||||
COMPUTE_CREATE_SCHEMA = {
|
||||
@ -52,10 +53,10 @@ COMPUTE_CREATE_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["compute_id", "protocol", "host", "port"]
|
||||
"required": ["protocol", "host", "port"]
|
||||
}
|
||||
|
||||
COMPUTE_UPDATE_SCHEMA = COMPUTE_CREATE_SCHEMA
|
||||
COMPUTE_UPDATE_SCHEMA = copy.deepcopy(COMPUTE_CREATE_SCHEMA)
|
||||
del COMPUTE_UPDATE_SCHEMA["required"]
|
||||
|
||||
COMPUTE_OBJECT_SCHEMA = {
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
|
||||
ETHERNET_HUB_CREATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
@ -129,5 +130,5 @@ ETHERNET_HUB_OBJECT_SCHEMA = {
|
||||
"required": ["name", "node_id", "project_id", "ports_mapping"]
|
||||
}
|
||||
|
||||
ETHERNET_HUB_UPDATE_SCHEMA = ETHERNET_HUB_OBJECT_SCHEMA
|
||||
ETHERNET_HUB_UPDATE_SCHEMA = copy.deepcopy(ETHERNET_HUB_OBJECT_SCHEMA)
|
||||
del ETHERNET_HUB_UPDATE_SCHEMA["required"]
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
|
||||
ETHERNET_SWITCH_CREATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
@ -153,5 +154,5 @@ ETHERNET_SWITCH_OBJECT_SCHEMA = {
|
||||
"required": ["name", "node_id", "project_id"]
|
||||
}
|
||||
|
||||
ETHERNET_SWITCH_UPDATE_SCHEMA = ETHERNET_SWITCH_OBJECT_SCHEMA
|
||||
ETHERNET_SWITCH_UPDATE_SCHEMA = copy.deepcopy(ETHERNET_SWITCH_OBJECT_SCHEMA)
|
||||
del ETHERNET_SWITCH_UPDATE_SCHEMA["required"]
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
|
||||
FRAME_RELAY_SWITCH_CREATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
@ -81,5 +82,5 @@ FRAME_RELAY_SWITCH_OBJECT_SCHEMA = {
|
||||
"required": ["name", "node_id", "project_id"]
|
||||
}
|
||||
|
||||
FRAME_RELAY_SWITCH_UPDATE_SCHEMA = FRAME_RELAY_SWITCH_OBJECT_SCHEMA
|
||||
FRAME_RELAY_SWITCH_UPDATE_SCHEMA = copy.deepcopy(FRAME_RELAY_SWITCH_OBJECT_SCHEMA)
|
||||
del FRAME_RELAY_SWITCH_UPDATE_SCHEMA["required"]
|
||||
|
@ -15,6 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import copy
|
||||
from .label import LABEL_OBJECT_SCHEMA
|
||||
|
||||
NODE_TYPE_SCHEMA = {
|
||||
@ -234,5 +235,5 @@ NODE_OBJECT_SCHEMA = {
|
||||
}
|
||||
|
||||
NODE_CREATE_SCHEMA = NODE_OBJECT_SCHEMA
|
||||
NODE_UPDATE_SCHEMA = NODE_OBJECT_SCHEMA
|
||||
NODE_UPDATE_SCHEMA = copy.deepcopy(NODE_OBJECT_SCHEMA)
|
||||
del NODE_UPDATE_SCHEMA["required"]
|
||||
|
@ -5,7 +5,7 @@
|
||||
</h3>
|
||||
<ul>
|
||||
<li><a href="https://gns3.com">Website</a></li>
|
||||
<li><a href="http://api.gns3.net">API documentation</a></li>
|
||||
<li><a href="http://docs.gns3.com">Documentation</a></li>
|
||||
</ul>
|
||||
<p>If you are looking for uploading the IOU. You can since 1.4 upload them directly from the client see: <a href="https://gns3.com/support/docs/how-to-configure-non-native-io-3">this documentation</a>.</p>
|
||||
<p>If you are looking for uploading the IOU. You can since 1.4 upload them directly from the client see: <a href="http://docs.gns3.com/1PKfYwR78QP_Z3jqxBQ1pdy6SsqM27qhvdCvSmIizRh4">this documentation</a>.</p>
|
||||
{% endblock %}
|
||||
|
@ -199,7 +199,7 @@ class Hypervisor(UBridgeHypervisor):
|
||||
try:
|
||||
yield from wait_for_process_termination(self._process, timeout=3)
|
||||
except asyncio.TimeoutError:
|
||||
if self._process.returncode is None:
|
||||
if self._process and self._process.returncode is None:
|
||||
log.warn("uBridge process {} is still running... killing it".format(self._process.pid))
|
||||
try:
|
||||
self._process.kill()
|
||||
|
@ -79,7 +79,7 @@ def list_images(type):
|
||||
|
||||
images.append({
|
||||
"filename": filename,
|
||||
"path": path,
|
||||
"path": force_unix_path(path),
|
||||
"md5sum": md5sum(os.path.join(root, filename)),
|
||||
"filesize": os.stat(os.path.join(root, filename)).st_size})
|
||||
except OSError as e:
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
import io
|
||||
import struct
|
||||
from xml.etree.ElementTree import ElementTree
|
||||
from xml.etree.ElementTree import ElementTree, ParseError
|
||||
|
||||
|
||||
def get_size(data, default_width=0, default_height=0):
|
||||
@ -95,7 +95,11 @@ def get_size(data, default_width=0, default_height=0):
|
||||
filetype = "svg"
|
||||
fhandle = io.BytesIO(data)
|
||||
tree = ElementTree()
|
||||
tree.parse(fhandle)
|
||||
try:
|
||||
tree.parse(fhandle)
|
||||
except ParseError:
|
||||
raise ValueError("Invalid SVG file")
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
try:
|
||||
|
@ -2,7 +2,6 @@ jsonschema>=2.4.0
|
||||
aiohttp>=1.3.5,<=1.4.0 # pyup: ignore
|
||||
aiohttp-cors==0.5.1 # pyup: ignore
|
||||
yarl>=0.9.8,<0.10 # pyup: ignore
|
||||
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4
|
||||
Jinja2>=2.7.3
|
||||
raven>=5.23.0
|
||||
psutil>=3.0.0
|
||||
|
3
setup.py
3
setup.py
@ -40,6 +40,9 @@ class PyTest(TestCommand):
|
||||
|
||||
dependencies = open("requirements.txt", "r").read().splitlines()
|
||||
|
||||
if sys.version_info <= (3, 4):
|
||||
dependencies.append('typing>=3.5.3.0 # Otherwise yarl fail with python 3.4')
|
||||
|
||||
setup(
|
||||
name="gns3-server",
|
||||
version=__import__("gns3server").__version__,
|
||||
|
@ -47,7 +47,7 @@ def test_query_success(loop, vm):
|
||||
vm._session.request = AsyncioMagicMock(return_value=response)
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
vm._session.request.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
'http://docker/v1.25/test',
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1},
|
||||
@ -70,7 +70,7 @@ def test_query_error(loop, vm):
|
||||
with pytest.raises(DockerError):
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
vm._session.request.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
'http://docker/v1.25/test',
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1},
|
||||
@ -91,7 +91,7 @@ def test_query_error_json(loop, vm):
|
||||
with pytest.raises(DockerError):
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
vm._session.request.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
'http://docker/v1.25/test',
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1},
|
||||
|
@ -154,6 +154,7 @@ def test_termination_callback(vm, async_run):
|
||||
assert event == vm
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
||||
def test_termination_callback_error(vm, tmpdir, async_run):
|
||||
|
||||
with open(str(tmpdir / "qemu.log"), "w+") as f:
|
||||
|
@ -219,18 +219,6 @@ def test_get_relative_image_path(qemu, tmpdir, config):
|
||||
assert qemu.get_relative_image_path(path5) == path5
|
||||
|
||||
|
||||
def test_get_relative_image_path_ova(qemu, tmpdir, config):
|
||||
os.makedirs(str(tmpdir / "QEMU" / "test.ova"))
|
||||
path = str(tmpdir / "QEMU" / "test.ova" / "test.bin")
|
||||
open(path, 'w+').close()
|
||||
|
||||
config.set_section_config("Server", {
|
||||
"images_path": str(tmpdir)
|
||||
})
|
||||
assert qemu.get_relative_image_path(path) == os.path.join("test.ova", "test.bin")
|
||||
assert qemu.get_relative_image_path(os.path.join("test.ova", "test.bin")) == os.path.join("test.ova", "test.bin")
|
||||
|
||||
|
||||
def test_list_images(loop, qemu, tmpdir):
|
||||
|
||||
fake_images = ["a.qcow2", "b.qcow2", ".blu.qcow2", "a.qcow2.md5sum"]
|
||||
@ -262,7 +250,7 @@ def test_list_images_recursives(loop, qemu, tmpdir):
|
||||
assert loop.run_until_complete(qemu.list_images()) == [
|
||||
{"filename": "a.qcow2", "path": "a.qcow2", "md5sum": "c4ca4238a0b923820dcc509a6f75849b", "filesize": 1},
|
||||
{"filename": "b.qcow2", "path": "b.qcow2", "md5sum": "c4ca4238a0b923820dcc509a6f75849b", "filesize": 1},
|
||||
{"filename": "c.qcow2", "path": os.path.sep.join(["c", "c.qcow2"]), "md5sum": "c4ca4238a0b923820dcc509a6f75849b", "filesize": 1}
|
||||
{"filename": "c.qcow2", "path": force_unix_path(os.path.sep.join(["c", "c.qcow2"])), "md5sum": "c4ca4238a0b923820dcc509a6f75849b", "filesize": 1}
|
||||
]
|
||||
|
||||
|
||||
|
@ -181,6 +181,7 @@ def test_import_iou_linux_no_vm(linux_platform, async_run, tmpdir, controller):
|
||||
{
|
||||
"compute_id": "local",
|
||||
"node_type": "iou",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
@ -224,6 +225,7 @@ def test_import_iou_linux_with_vm(linux_platform, async_run, tmpdir, controller)
|
||||
"compute_id": "local",
|
||||
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
|
||||
"node_type": "iou",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
@ -267,11 +269,13 @@ def test_import_iou_non_linux(windows_platform, async_run, tmpdir, controller):
|
||||
"compute_id": "local",
|
||||
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
|
||||
"node_type": "iou",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
},
|
||||
{
|
||||
"compute_id": "local",
|
||||
"node_type": "vpcs",
|
||||
"name": "test2",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
@ -319,12 +323,14 @@ def test_import_node_id(linux_platform, async_run, tmpdir, controller):
|
||||
"compute_id": "local",
|
||||
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
|
||||
"node_type": "iou",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
},
|
||||
{
|
||||
"compute_id": "local",
|
||||
"node_id": "c3ae286c-c81f-40d9-a2d0-5874b2f2478d",
|
||||
"node_type": "iou",
|
||||
"name": "test2",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
@ -409,6 +415,7 @@ def test_import_keep_compute_id(windows_platform, async_run, tmpdir, controller)
|
||||
"compute_id": "local",
|
||||
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
|
||||
"node_type": "iou",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
|
@ -35,11 +35,7 @@ def test_symbols(http_controller):
|
||||
def test_get(http_controller):
|
||||
response = http_controller.get('/symbols/' + urllib.parse.quote(':/symbols/firewall.svg') + '/raw')
|
||||
assert response.status == 200
|
||||
# Different carriage return
|
||||
if sys.platform.startswith("win"):
|
||||
assert response.headers['CONTENT-LENGTH'] == '9568'
|
||||
else:
|
||||
assert response.headers['CONTENT-LENGTH'] == '9381'
|
||||
assert response.headers['CONTENT-LENGTH'] == '9381'
|
||||
assert response.headers['CONTENT-TYPE'] == 'image/svg+xml'
|
||||
assert '</svg>' in response.html
|
||||
|
||||
|
@ -24,6 +24,8 @@ def test_force_unix_path():
|
||||
assert force_unix_path("a\\b") == "a/b"
|
||||
assert force_unix_path("a\\b\\..\\c") == "a/c"
|
||||
assert force_unix_path("C:\Temp") == "C:/Temp"
|
||||
assert force_unix_path(force_unix_path("C:\Temp")) == "C:/Temp"
|
||||
assert force_unix_path("a//b") == "a/b"
|
||||
|
||||
|
||||
def test_macaddress_to_int():
|
||||
|
@ -16,6 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
@ -105,9 +106,10 @@ def test_list_images(tmpdir):
|
||||
path = tmpdir / "images2" / "test_invalid.image"
|
||||
path.write(b'NOTANELF', ensure=True)
|
||||
|
||||
path3 = tmpdir / "images1" / "IOU" / "test3.bin"
|
||||
path3.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
||||
path3 = force_unix_path(str(path3))
|
||||
if sys.platform.startswith("linux"):
|
||||
path3 = tmpdir / "images1" / "IOU" / "test3.bin"
|
||||
path3.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
||||
path3 = force_unix_path(str(path3))
|
||||
|
||||
path4 = tmpdir / "images1" / "QEMU" / "test4.qcow2"
|
||||
path4.write("1", ensure=True)
|
||||
@ -137,14 +139,15 @@ def test_list_images(tmpdir):
|
||||
}
|
||||
]
|
||||
|
||||
assert list_images("iou") == [
|
||||
{
|
||||
'filename': 'test3.bin',
|
||||
'filesize': 7,
|
||||
'md5sum': 'b0d5aa897d937aced5a6b1046e8f7e2e',
|
||||
'path': 'test3.bin'
|
||||
}
|
||||
]
|
||||
if sys.platform.startswith("linux"):
|
||||
assert list_images("iou") == [
|
||||
{
|
||||
'filename': 'test3.bin',
|
||||
'filesize': 7,
|
||||
'md5sum': 'b0d5aa897d937aced5a6b1046e8f7e2e',
|
||||
'path': 'test3.bin'
|
||||
}
|
||||
]
|
||||
|
||||
assert list_images("qemu") == [
|
||||
{
|
||||
|
3
win-requirements.txt
Normal file
3
win-requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
-rrequirements.txt
|
||||
|
||||
pypiwin32 # pyup: ignore
|
Loading…
Reference in New Issue
Block a user