Some spring cleaning.

This commit is contained in:
grossmj 2018-03-15 14:17:39 +07:00
parent 88674455a3
commit 90ce6093d8
33 changed files with 264 additions and 146 deletions

View File

@ -20,7 +20,6 @@ import os
import struct
import stat
import asyncio
from asyncio.futures import CancelledError
import aiohttp
import socket
@ -70,6 +69,7 @@ class BaseManager:
"""
:returns: Array of supported node type on this computer
"""
# By default we transform DockerVM => docker but you can override this (see builtins)
return [cls._NODE_CLASS.__name__.rstrip('VM').lower()]
@ -78,6 +78,7 @@ class BaseManager:
"""
List of nodes manage by the module
"""
return self._nodes.values()
@classmethod
@ -109,6 +110,7 @@ class BaseManager:
:returns: Port manager
"""
if self._port_manager is None:
self._port_manager = PortManager.instance()
return self._port_manager
@ -271,6 +273,7 @@ class BaseManager:
:param destination_node_id: Destination node identifier
:returns: New node instance
"""
source_node = self.get_node(source_node_id)
destination_node = self.get_node(destination_node_id)
@ -283,9 +286,9 @@ class BaseManager:
shutil.rmtree(destination_dir)
shutil.copytree(source_node.working_dir, destination_dir)
except OSError as e:
raise aiohttp.web.HTTPConflict(text="Can't duplicate node data: {}".format(e))
raise aiohttp.web.HTTPConflict(text="Cannot duplicate node data: {}".format(e))
# We force a refresh of the name. This force the rewrite
# We force a refresh of the name. This forces the rewrite
# of some configuration files
node_name = destination_node.name
destination_node.name = node_name + str(uuid4())
@ -539,12 +542,14 @@ class BaseManager:
"""
Get the image directory on disk
"""
if hasattr(self, "_NODE_TYPE"):
return default_images_directory(self._NODE_TYPE)
raise NotImplementedError
@asyncio.coroutine
def write_image(self, filename, stream):
directory = self.get_images_directory()
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
if os.path.commonprefix([directory, path]) != directory:

View File

@ -265,6 +265,7 @@ class BaseNode:
"""
Delete the node (including all its files).
"""
def set_rw(operation, name, exc):
os.chmod(name, stat.S_IWRITE)
@ -287,6 +288,7 @@ class BaseNode:
"""
Stop the node process.
"""
if self._wrapper_telnet_server:
self._wrapper_telnet_server.close()
yield from self._wrapper_telnet_server.wait_closed()
@ -332,6 +334,7 @@ class BaseNode:
Start a telnet proxy for the console allowing multiple client
connected at the same time
"""
if not self._wrap_console or self._console_type != "telnet":
return
remaining_trial = 60
@ -353,6 +356,7 @@ class BaseNode:
"""
:returns: Boolean allocate or not an aux console
"""
return self._allocate_aux
@allocate_aux.setter
@ -360,6 +364,7 @@ class BaseNode:
"""
:returns: Boolean allocate or not an aux console
"""
self._allocate_aux = allocate_aux
@property
@ -593,6 +598,7 @@ class BaseNode:
"""
:params name: Delete the bridge with this name
"""
if self.ubridge:
yield from self._ubridge_send("bridge delete {name}".format(name=name))
@ -604,6 +610,7 @@ class BaseNode:
:param bridge_name: bridge name in uBridge
:param filters: Array of filter dictionary
"""
yield from self._ubridge_send('bridge reset_packet_filters ' + bridge_name)
for packet_filter in self._build_filter_list(filters):
cmd = 'bridge add_packet_filter {} {}'.format(bridge_name, packet_filter)
@ -622,6 +629,7 @@ class BaseNode:
"""
:returns: Iterator building a list of filter
"""
i = 0
for (filter_type, values) in filters.items():
if isinstance(values[0], str):

View File

@ -42,11 +42,12 @@ class Cloud(BaseNode):
:param manager: Parent VM Manager
"""
def __init__(self, name, node_id, project, manager, ports=[]):
def __init__(self, name, node_id, project, manager, ports=None):
super().__init__(name, node_id, project, manager)
self._nios = {}
# If the cloud is not configured we fill it with host interfaces
# Populate the cloud with host interfaces if it is not configured
if not ports or len(ports) == 0:
self._ports_mapping = []
for interface in self._interfaces():
@ -109,7 +110,7 @@ class Cloud(BaseNode):
if ports != self._ports_mapping:
if len(self._nios) > 0:
raise NodeError("Can't modify a cloud already connected.")
raise NodeError("Can't modify a cloud that is already connected.")
port_number = 0
for port in ports:
@ -129,6 +130,10 @@ class Cloud(BaseNode):
@asyncio.coroutine
def start(self):
"""
Starts this cloud.
"""
if self.status != "started":
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
yield from self._stop_ubridge()
@ -160,11 +165,14 @@ class Cloud(BaseNode):
@asyncio.coroutine
def _is_wifi_adapter_osx(self, adapter_name):
"""
Detects a Wifi adapter on Mac.
"""
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports")
except (FileNotFoundError, subprocess.SubprocessError) as e:
log.warn("Could not execute networksetup: {}".format(e))
log.warning("Could not execute networksetup: {}".format(e))
return False
is_wifi = False
@ -244,10 +252,11 @@ class Cloud(BaseNode):
@asyncio.coroutine
def _add_linux_ethernet(self, port_info, bridge_name):
"""
Use raw sockets on Linux.
Connects an Ethernet interface on Linux using raw sockets.
If interface is a bridge we connect a tap to it
A TAP is used if the interface is a bridge
"""
interface = port_info["interface"]
if gns3server.utils.interfaces.is_interface_bridge(interface):
@ -266,6 +275,10 @@ class Cloud(BaseNode):
@asyncio.coroutine
def _add_osx_ethernet(self, port_info, bridge_name):
"""
Connects an Ethernet interface on OSX using libpcap.
"""
# Wireless adapters are not well supported by the libpcap on OSX
if (yield from self._is_wifi_adapter_osx(port_info["interface"])):
raise NodeError("Connecting to a Wireless adapter is not supported on Mac OS")
@ -280,6 +293,10 @@ class Cloud(BaseNode):
@asyncio.coroutine
def _add_windows_ethernet(self, port_info, bridge_name):
"""
Connects an Ethernet interface on Windows.
"""
if not gns3server.utils.interfaces.has_netmask(port_info["interface"]):
raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"]))
yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))

View File

@ -25,15 +25,15 @@ import gns3server.utils.interfaces
class Nat(Cloud):
"""
A portable and preconfigured node allowing topology to get a
nat access to the outside
A portable and pre-configured node allowing topologies to get a
NAT access.
"""
def __init__(self, *args, **kwargs):
if sys.platform.startswith("linux"):
if "virbr0" not in [interface["name"] for interface in gns3server.utils.interfaces.interfaces()]:
raise NodeError("virbr0 is missing. You need to install libvirt")
raise NodeError("virbr0 is missing, please install libvirt")
interface = "virbr0"
else:
interfaces = list(filter(lambda x: 'vmnet8' in x.lower(),

View File

@ -33,7 +33,7 @@ from gns3server.compute.docker.docker_error import DockerError, DockerHttp304Err
log = logging.getLogger(__name__)
# Be carefull to keep it consistent
# Be careful to keep it consistent
DOCKER_MINIMUM_API_VERSION = "1.25"
DOCKER_MINIMUM_VERSION = "1.13"
DOCKER_PREFERRED_API_VERSION = "1.30"
@ -44,6 +44,7 @@ class Docker(BaseManager):
_NODE_CLASS = DockerVM
def __init__(self):
super().__init__()
self._server_url = '/var/run/docker.sock'
self._connected = False
@ -55,6 +56,7 @@ class Docker(BaseManager):
@asyncio.coroutine
def _check_connection(self):
if not self._connected:
try:
self._connected = True
@ -76,6 +78,7 @@ class Docker(BaseManager):
self._api_version = DOCKER_PREFERRED_API_VERSION
def connector(self):
if self._connector is None or self._connector.closed:
if not sys.platform.startswith("linux"):
raise DockerError("Docker is supported only on Linux")
@ -87,6 +90,7 @@ class Docker(BaseManager):
@asyncio.coroutine
def unload(self):
yield from super().unload()
if self._connected:
if self._connector and not self._connector.closed:
@ -95,7 +99,7 @@ class Docker(BaseManager):
@asyncio.coroutine
def query(self, method, path, data={}, params={}):
"""
Make a query to the docker daemon and decode the request
Makes a query to the Docker daemon and decode the request
:param method: HTTP method
:param path: Endpoint in API
@ -116,7 +120,7 @@ class Docker(BaseManager):
@asyncio.coroutine
def http_query(self, method, path, data={}, params={}, timeout=300):
"""
Make a query to the docker daemon
Makes a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
@ -125,6 +129,7 @@ class Docker(BaseManager):
:param timeout: Timeout
:returns: HTTP response
"""
data = json.dumps(data)
if timeout is None:
timeout = 60 * 60 * 24 * 31 # One month timeout
@ -169,7 +174,7 @@ class Docker(BaseManager):
@asyncio.coroutine
def websocket_query(self, path, params={}):
"""
Open a websocket connection
Opens a websocket connection
:param path: Endpoint in API
:param params: Parameters added as a query arg
@ -185,7 +190,7 @@ class Docker(BaseManager):
@locked_coroutine
def pull_image(self, image, progress_callback=None):
"""
Pull image from docker repository
Pulls an image from the Docker repository
:params image: Image name
:params progress_callback: A function that receive a log message about image download progress
@ -226,11 +231,13 @@ class Docker(BaseManager):
@asyncio.coroutine
def list_images(self):
"""Gets Docker image list.
"""
Gets Docker image list.
:returns: list of dicts
:rtype: list
"""
images = []
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
if image['RepoTags']:

View File

@ -180,11 +180,13 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _get_container_state(self):
"""Returns the container state (e.g. running, paused etc.)
"""
Returns the container state (e.g. running, paused etc.)
:returns: state
:rtype: str
"""
try:
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
except DockerError:
@ -201,17 +203,19 @@ class DockerVM(BaseNode):
"""
:returns: Dictionary information about the container image
"""
result = yield from self.manager.query("GET", "images/{}/json".format(self._image))
return result
def _mount_binds(self, image_infos):
def _mount_binds(self, image_info):
"""
:returns: Return the path that we need to map to local folders
"""
ressources = get_resource("compute/docker/resources")
if not os.path.exists(ressources):
raise DockerError("{} is missing can't start Docker containers".format(ressources))
binds = ["{}:/gns3:ro".format(ressources)]
resources = get_resource("compute/docker/resources")
if not os.path.exists(resources):
raise DockerError("{} is missing can't start Docker containers".format(resources))
binds = ["{}:/gns3:ro".format(resources)]
# We mount our own etc/network
network_config = self._create_network_config()
@ -219,7 +223,7 @@ class DockerVM(BaseNode):
self._volumes = ["/etc/network"]
volumes = image_infos.get("Config", {}).get("Volumes")
volumes = image_info.get("Config", {}).get("Volumes")
if volumes is None:
return binds
for volume in volumes.keys():
@ -266,7 +270,9 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def create(self):
"""Creates the Docker container."""
"""
Creates the Docker container.
"""
try:
image_infos = yield from self._get_image_information()
@ -336,6 +342,7 @@ class DockerVM(BaseNode):
"""
Destroy an recreate the container with the new settings
"""
# We need to save the console and state and restore it
console = self.console
aux = self.aux
@ -350,7 +357,9 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def start(self):
"""Starts this Docker container."""
"""
Starts this Docker container.
"""
try:
state = yield from self._get_container_state()
@ -401,7 +410,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _start_aux(self):
"""
Start an auxilary console
Starts an auxiliary console
"""
# We can not use the API because docker doesn't expose a websocket api for exec
@ -450,7 +459,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _start_vnc(self):
"""
Start a VNC server for this container
Starts a VNC server for this container
"""
self._display = self._get_free_display_port()
@ -466,9 +475,10 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _start_http(self):
"""
Start an HTTP tunnel to container localhost. It's not perfect
Starts an HTTP tunnel to container localhost. It's not perfect
but the only way we have to inject network packet is using nc.
"""
log.debug("Forward HTTP for %s to %d", self.name, self._console_http_port)
command = ["docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "nc", "127.0.0.1", str(self._console_http_port)]
# We replace host and port in the server answer otherwise some link could be broken
@ -487,7 +497,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _start_console(self):
"""
Start streaming the console via telnet
Starts streaming the console via telnet
"""
class InputStream:
@ -520,7 +530,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _read_console_output(self, ws, out):
"""
Read Websocket and forward it to the telnet
Reads Websocket and forward it to the telnet
:param ws: Websocket connection
:param out: Output stream
@ -542,11 +552,13 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def is_running(self):
"""Checks if the container is running.
"""
Checks if the container is running.
:returns: True or False
:rtype: bool
"""
state = yield from self._get_container_state()
if state == "running":
return True
@ -556,7 +568,10 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def restart(self):
"""Restart this Docker container."""
"""
Restart this Docker container.
"""
yield from self.manager.query("POST", "containers/{}/restart".format(self._cid))
log.info("Docker container '{name}' [{image}] restarted".format(
name=self._name, image=self._image))
@ -566,6 +581,7 @@ class DockerVM(BaseNode):
"""
Clean the list of running console servers
"""
if len(self._telnet_servers) > 0:
for telnet_server in self._telnet_servers:
telnet_server.close()
@ -574,7 +590,9 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def stop(self):
"""Stops this Docker container."""
"""
Stops this Docker container.
"""
try:
yield from self._clean_servers()
@ -608,21 +626,29 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def pause(self):
"""Pauses this Docker container."""
"""
Pauses this Docker container.
"""
yield from self.manager.query("POST", "containers/{}/pause".format(self._cid))
self.status = "suspended"
log.info("Docker container '{name}' [{image}] paused".format(name=self._name, image=self._image))
@asyncio.coroutine
def unpause(self):
"""Unpauses this Docker container."""
"""
Unpauses this Docker container.
"""
yield from self.manager.query("POST", "containers/{}/unpause".format(self._cid))
self.status = "started"
log.info("Docker container '{name}' [{image}] unpaused".format(name=self._name, image=self._image))
@asyncio.coroutine
def close(self):
"""Closes this Docker container."""
"""
Closes this Docker container.
"""
if not (yield from super().close()):
return False
@ -630,6 +656,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def reset(self):
try:
state = yield from self._get_container_state()
if state == "paused" or state == "running":
@ -705,11 +732,13 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _get_namespace(self):
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
return int(result['State']['Pid'])
@asyncio.coroutine
def _connect_nio(self, adapter_number, nio):
bridge_name = 'bridge{}'.format(adapter_number)
yield from self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name,
lport=nio.lport,
@ -724,12 +753,13 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def adapter_add_nio_binding(self, adapter_number, nio):
"""Adds an adapter NIO binding.
"""
Adds an adapter NIO binding.
:param adapter_number: adapter number
:param nio: NIO instance to add to the slot/port
"""
try:
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
@ -768,6 +798,7 @@ class DockerVM(BaseNode):
:returns: NIO instance
"""
try:
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
@ -792,16 +823,19 @@ class DockerVM(BaseNode):
@property
def adapters(self):
"""Returns the number of Ethernet adapters for this Docker VM.
"""
Returns the number of Ethernet adapters for this Docker VM.
:returns: number of adapters
:rtype: int
"""
return len(self._ethernet_adapters)
@adapters.setter
def adapters(self, adapters):
"""Sets the number of Ethernet adapters for this Docker container.
"""
Sets the number of Ethernet adapters for this Docker container.
:param adapters: number of adapters
"""
@ -820,8 +854,9 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def pull_image(self, image):
"""
Pull image from docker repository
Pulls an image from Docker repository
"""
def callback(msg):
self.project.emit("log.info", {"message": msg})
yield from self.manager.pull_image(image, progress_callback=callback)
@ -829,7 +864,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _start_ubridge_capture(self, adapter_number, output_file):
"""
Start a packet capture in uBridge.
Starts a packet capture in uBridge.
:param adapter_number: adapter number
:param output_file: PCAP destination file for the capture
@ -843,7 +878,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _stop_ubridge_capture(self, adapter_number):
"""
Stop a packet capture in uBridge.
Stops a packet capture in uBridge.
:param adapter_number: adapter number
"""
@ -915,7 +950,7 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def _get_log(self):
"""
Return the log from the container
Returns the log from the container
:returns: string
"""
@ -926,7 +961,8 @@ class DockerVM(BaseNode):
@asyncio.coroutine
def delete(self):
"""
Delete the VM (including all its files).
Deletes the VM (including all its files).
"""
yield from self.close()
yield from super().delete()

View File

@ -231,7 +231,7 @@ class Dynamips(BaseManager):
self._ghost_files.remove(file)
yield from wait_run_in_executor(os.remove, file)
except OSError as e:
log.warn("Could not delete file {}: {}".format(file, e))
log.warning("Could not delete file {}: {}".format(file, e))
continue
# Release the dynamips ids if we want to reload the same project
@ -432,7 +432,7 @@ class Dynamips(BaseManager):
finally:
yield from ghost.clean_delete()
except DynamipsError as e:
log.warn("Could not create ghost instance: {}".format(e))
log.warning("Could not create ghost instance: {}".format(e))
if vm.ghost_file != ghost_file and os.path.isfile(ghost_file_path):
# set the ghost file to the router

View File

@ -156,13 +156,13 @@ class Hypervisor(DynamipsHypervisor):
yield from wait_for_process_termination(self._process, timeout=3)
except asyncio.TimeoutError:
if self._process.returncode is None:
log.warn("Dynamips process {} is still running... killing it".format(self._process.pid))
log.warning("Dynamips process {} is still running... killing it".format(self._process.pid))
try:
self._process.kill()
except OSError as e:
log.error("Cannot stop the Dynamips process: {}".format(e))
if self._process.returncode is None:
log.warn('Dynamips hypervisor with PID={} is still running'.format(self._process.pid))
log.warning('Dynamips hypervisor with PID={} is still running'.format(self._process.pid))
if self._stdout_file and os.access(self._stdout_file, os.W_OK):
try:
@ -183,7 +183,7 @@ class Hypervisor(DynamipsHypervisor):
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("could not read {}: {}".format(self._stdout_file, e))
log.warning("could not read {}: {}".format(self._stdout_file, e))
return output
def is_running(self):

View File

@ -328,7 +328,7 @@ class Router(BaseNode):
try:
yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
except DynamipsError as e:
log.warn("Could not stop {}: {}".format(self._name, e))
log.warning("Could not stop {}: {}".format(self._name, e))
self.status = "stopped"
log.info('Router "{name}" [{id}] has been stopped'.format(name=self._name, id=self._id))
if self._memory_watcher:
@ -403,7 +403,7 @@ class Router(BaseNode):
yield from self.stop()
yield from self._hypervisor.send('vm delete "{}"'.format(self._name))
except DynamipsError as e:
log.warn("Could not stop and delete {}: {}".format(self._name, e))
log.warning("Could not stop and delete {}: {}".format(self._name, e))
yield from self.hypervisor.stop()
if self._auto_delete_disks:
@ -420,7 +420,7 @@ class Router(BaseNode):
log.debug("Deleting file {}".format(file))
yield from wait_run_in_executor(os.remove, file)
except OSError as e:
log.warn("Could not delete file {}: {}".format(file, e))
log.warning("Could not delete file {}: {}".format(file, e))
continue
self.manager.release_dynamips_id(self.project.id, self.dynamips_id)
@ -1582,12 +1582,13 @@ class Router(BaseNode):
def delete(self):
"""
Delete this VM (including all its files).
Deletes this VM (including all its files).
"""
try:
yield from wait_run_in_executor(shutil.rmtree, self._working_directory)
except OSError as e:
log.warn("Could not delete file {}".format(e))
log.warning("Could not delete file {}".format(e))
self.manager.release_dynamips_id(self._project.id, self._dynamips_id)
@ -1602,10 +1603,11 @@ class Router(BaseNode):
try:
yield from wait_run_in_executor(shutil.rmtree, self._working_directory)
except OSError as e:
log.warn("Could not delete file {}".format(e))
log.warning("Could not delete file {}".format(e))
log.info('Router "{name}" [{id}] has been deleted (including associated files)'.format(name=self._name, id=self._id))
def _memory_files(self):
return [
os.path.join(self._working_directory, "{}_i{}_rom".format(self.platform, self.dynamips_id)),
os.path.join(self._working_directory, "{}_i{}_nvram".format(self.platform, self.dynamips_id))

View File

@ -369,7 +369,7 @@ class IOUVM(BaseNode):
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
except (FileNotFoundError, subprocess.SubprocessError) as e:
log.warn("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
log.warning("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
return
p = re.compile("([\.\w]+)\s=>\s+not found")
@ -765,7 +765,7 @@ class IOUVM(BaseNode):
with open(self._iou_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("could not read {}: {}".format(self._iou_stdout_file, e))
log.warning("could not read {}: {}".format(self._iou_stdout_file, e))
return output
@property

View File

@ -29,6 +29,7 @@ def get_next_application_id(nodes):
:raises IOUError when exceeds number
:return: integer first free id
"""
used = set([n.application_id for n in nodes])
pool = set(range(1, 512))
try:

View File

@ -60,6 +60,7 @@ class NotificationManager:
def instance():
"""
Singleton to return only on instance of NotificationManager.
:returns: instance of NotificationManager
"""

View File

@ -69,14 +69,16 @@ class PortManager:
@property
def console_host(self):
assert self._console_host is not None
return self._console_host
@console_host.setter
def console_host(self, new_host):
"""
If allow remote connection we need to bind console host to 0.0.0.0
Bind console host to 0.0.0.0 if remote connections are allowed.
"""
server_config = Config.instance().get_section_config("Server")
remote_console_connections = server_config.getboolean("allow_remote_console")
if remote_console_connections:
@ -228,14 +230,12 @@ class PortManager:
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
log.debug(msg)
#project.emit("log.warning", {"message": msg})
return port
if port < port_range_start or port > port_range_end:
old_port = port
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port)
log.debug(msg)
#project.emit("log.warning", {"message": msg})
return port
try:
PortManager._check_port(self._console_host, port, "TCP")
@ -244,7 +244,6 @@ class PortManager:
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
log.debug(msg)
#project.emit("log.warning", {"message": msg})
return port
self._used_tcp_ports.add(port)

View File

@ -20,9 +20,6 @@ import aiohttp
import shutil
import asyncio
import hashlib
import zipstream
import zipfile
import json
from uuid import UUID, uuid4
from .port_manager import PortManager
@ -110,7 +107,7 @@ class Project:
if hasattr(self, "_path"):
if path != self._path and self.is_local() is False:
raise aiohttp.web.HTTPForbidden(text="You are not allowed to modify the project directory path")
raise aiohttp.web.HTTPForbidden(text="Changing the project directory path is not allowed")
self._path = path
@ -123,7 +120,7 @@ class Project:
def name(self, name):
if "/" in name or "\\" in name:
raise aiohttp.web.HTTPForbidden(text="Name can not contain path separator")
raise aiohttp.web.HTTPForbidden(text="Project names cannot contain path separators")
self._name = name
@property
@ -290,7 +287,7 @@ class Project:
@asyncio.coroutine
def close(self):
"""
Closes the project, but keep information on disk
Closes the project, but keep project data on disk
"""
project_nodes_id = set([n.id for n in self.nodes])

View File

@ -48,6 +48,7 @@ class Qemu(BaseManager):
:returns: List of architectures for which KVM is available on this server.
"""
kvm = []
if not os.path.exists("/dev/kvm"):
@ -182,7 +183,7 @@ class Qemu(BaseManager):
if match:
return version
except (UnicodeDecodeError, OSError) as e:
log.warn("could not read {}: {}".format(version_file, e))
log.warning("could not read {}: {}".format(version_file, e))
return ""
else:
try:

View File

@ -67,7 +67,7 @@ class QemuVM(BaseNode):
def __init__(self, name, node_id, project, manager, linked_clone=True, qemu_path=None, console=None, console_type="telnet", platform=None):
super().__init__(name, node_id, project, manager, console=console, console_type=console_type, wrap_console=True)
super().__init__(name, node_id, project, manager, console=console, console_type=console_type, linked_clone=linked_clone, wrap_console=True)
server_config = manager.config.get_section_config("Server")
self._host = server_config.get("host", "127.0.0.1")
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
@ -178,6 +178,7 @@ class QemuVM(BaseNode):
qemu_path=qemu_path))
def _check_qemu_path(self, qemu_path):
if qemu_path is None:
raise QemuError("QEMU binary path is not set")
if not os.path.exists(qemu_path):
@ -194,6 +195,7 @@ class QemuVM(BaseNode):
@platform.setter
def platform(self, platform):
self._platform = platform
if sys.platform.startswith("win"):
self.qemu_path = "qemu-system-{}w.exe".format(platform)
@ -207,6 +209,7 @@ class QemuVM(BaseNode):
:param variable: Variable name in the class
:param value: New disk value
"""
value = self.manager.get_abs_image_path(value)
if not self.linked_clone:
for node in self.manager.nodes:
@ -235,6 +238,7 @@ class QemuVM(BaseNode):
:param hda_disk_image: QEMU hda disk image path
"""
self._disk_setter("hda_disk_image", hda_disk_image)
@property
@ -737,7 +741,7 @@ class QemuVM(BaseNode):
id=self._id,
initrd=initrd))
if "asa" in initrd:
self.project.emit("log.warning", {"message": "Warning ASA 8 is not supported by GNS3 and Cisco, you need to use ASAv. Depending of your hardware and OS this could not work or you could be limited to one instance. If ASA 8 is not booting their is no GNS3 solution, you need to upgrade to ASAv."})
self.project.emit("log.warning", {"message": "Warning ASA 8 is not supported by GNS3 and Cisco, please use ASAv instead. Depending of your hardware and OS this could not work or you could be limited to one instance. If ASA 8 is not booting their is no GNS3 solution, you must to upgrade to ASAv."})
self._initrd = initrd
@property
@ -1002,7 +1006,7 @@ class QemuVM(BaseNode):
except ProcessLookupError:
pass
if self._process.returncode is None:
log.warn('QEMU VM "{}" PID={} is still running'.format(self._name, self._process.pid))
log.warning('QEMU VM "{}" PID={} is still running'.format(self._name, self._process.pid))
self._process = None
self._stop_cpulimit()
yield from super().stop()
@ -1025,12 +1029,12 @@ class QemuVM(BaseNode):
log.info("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor))
reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor)
except OSError as e:
log.warn("Could not connect to QEMU monitor: {}".format(e))
log.warning("Could not connect to QEMU monitor: {}".format(e))
return result
try:
writer.write(command.encode('ascii') + b"\n")
except OSError as e:
log.warn("Could not write to QEMU monitor: {}".format(e))
log.warning("Could not write to QEMU monitor: {}".format(e))
writer.close()
return result
if expected:
@ -1044,7 +1048,7 @@ class QemuVM(BaseNode):
result = line.decode("utf-8").strip()
break
except EOFError as e:
log.warn("Could not read from QEMU monitor: {}".format(e))
log.warning("Could not read from QEMU monitor: {}".format(e))
writer.close()
return result

View File

@ -334,7 +334,7 @@ class VirtualBoxVM(BaseNode):
# deactivate the first serial port
yield from self._modify_vm("--uart1 off")
except VirtualBoxError as e:
log.warn("Could not deactivate the first serial port: {}".format(e))
log.warning("Could not deactivate the first serial port: {}".format(e))
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
@ -356,7 +356,7 @@ class VirtualBoxVM(BaseNode):
self.status = "suspended"
log.info("VirtualBox VM '{name}' [{id}] suspended".format(name=self.name, id=self.id))
else:
log.warn("VirtualBox VM '{name}' [{id}] cannot be suspended, current state: {state}".format(name=self.name,
log.warning("VirtualBox VM '{name}' [{id}] cannot be suspended, current state: {state}".format(name=self.name,
id=self.id,
state=vm_state))
@ -425,7 +425,7 @@ class VirtualBoxVM(BaseNode):
hdd_file))
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error reattaching HDD {controller} {port} {device} {medium}: {error}".format(name=self.name,
log.warning("VirtualBox VM '{name}' [{id}] error reattaching HDD {controller} {port} {device} {medium}: {error}".format(name=self.name,
id=self.id,
controller=hdd_info["controller"],
port=hdd_info["port"],
@ -525,7 +525,7 @@ class VirtualBoxVM(BaseNode):
hdd["port"],
hdd["device"]))
except VirtualBoxError as e:
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
log.warning("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
id=self.id,
controller=hdd["controller"],
port=hdd["port"],
@ -928,7 +928,7 @@ class VirtualBoxVM(BaseNode):
# It seem sometimes this failed due to internal race condition of Vbox
# we have no real explanation of this.
except VirtualBoxError:
log.warn("Snapshot 'reset' not created")
log.warning("Snapshot 'reset' not created")
os.makedirs(os.path.join(self.working_dir, self._vmname), exist_ok=True)

View File

@ -24,7 +24,6 @@ import os
import asyncio
import tempfile
from gns3server.utils.interfaces import interfaces
from gns3server.utils.asyncio.telnet_server import AsyncioTelnetServer
from gns3server.utils.asyncio.serial import asyncio_open_serial
from gns3server.utils.asyncio import locked_coroutine

View File

@ -264,6 +264,7 @@ class VPCSVM(BaseNode):
:param returncode: Process returncode
"""
if self._started:
log.info("VPCS process has stopped, return code: %d", returncode)
self._started = False
@ -291,7 +292,7 @@ class VPCSVM(BaseNode):
except OSError as e:
log.error("Cannot stop the VPCS process: {}".format(e))
if self._process.returncode is None:
log.warn('VPCS VM "{}" with PID={} is still running'.format(self._name, self._process.pid))
log.warning('VPCS VM "{}" with PID={} is still running'.format(self._name, self._process.pid))
self._process = None
self._started = False
@ -333,7 +334,7 @@ class VPCSVM(BaseNode):
with open(self._vpcs_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("Could not read {}: {}".format(self._vpcs_stdout_file, e))
log.warning("Could not read {}: {}".format(self._vpcs_stdout_file, e))
return output
def is_running(self):
@ -508,28 +509,20 @@ class VPCSVM(BaseNode):
if self._vpcs_version >= parse_version("0.8b"):
command.extend(["-R"]) # disable the relay feature of VPCS (starting with VPCS 0.8)
else:
log.warn("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b")
log.warning("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b")
# use the local UDP tunnel to uBridge instead
if not self._local_udp_tunnel:
self._local_udp_tunnel = self._create_local_udp_tunnel()
nio = self._local_udp_tunnel[0]
if nio:
if isinstance(nio, NIOUDP):
# UDP tunnel
command.extend(["-s", str(nio.lport)]) # source UDP port
command.extend(["-c", str(nio.rport)]) # destination UDP port
try:
command.extend(["-t", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because VPCS doesn't support it
except socket.gaierror as e:
raise VPCSError("Can't resolve hostname {}".format(nio.rhost))
elif isinstance(nio, NIOTAP):
# FIXME: remove old code
# TAP interface
command.extend(["-e"])
command.extend(["-d", nio.tap_device])
if nio and isinstance(nio, NIOUDP):
# UDP tunnel
command.extend(["-s", str(nio.lport)]) # source UDP port
command.extend(["-c", str(nio.rport)]) # destination UDP port
try:
command.extend(["-t", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because VPCS doesn't support it
except socket.gaierror as e:
raise VPCSError("Can't resolve hostname {}".format(nio.rhost))
if self.script_file:
command.extend([os.path.basename(self.script_file)])

View File

@ -42,21 +42,19 @@ log = logging.getLogger(__name__)
class Controller:
"""The controller is responsible to manage one or more compute servers"""
"""
The controller is responsible to manage one or more compute servers.
"""
def __init__(self):
self._computes = {}
self._projects = {}
# Store settings shared by the different GUI will be replaced
# by dedicated API later
self._settings = {}
self._notification = Notification(self)
self.gns3vm = GNS3VM(self)
self.symbols = Symbols()
# Store settings shared by the different GUI will be replace by dedicated API later
# FIXME: store settings shared by the different GUI will be replace by dedicated API later
self._settings = None
self._appliances = {}
self._appliance_templates = {}
@ -65,6 +63,7 @@ class Controller:
log.info("Load controller configuration file {}".format(self._config_file))
def load_appliances(self):
self._appliance_templates = {}
for directory, builtin in (
(get_resource('appliances'), True,), (self.appliances_path(), False,)
@ -74,7 +73,7 @@ class Controller:
if not file.endswith('.gns3a') and not file.endswith('.gns3appliance'):
continue
path = os.path.join(directory, file)
appliance_id = uuid.uuid3(uuid.NAMESPACE_URL, path) # Generate the UUID from path to avoid change between reboots
appliance_id = uuid.uuid3(uuid.NAMESPACE_URL, path) # Generate UUID from path to avoid change between reboots
try:
with open(path, 'r', encoding='utf-8') as f:
appliance = ApplianceTemplate(appliance_id, json.load(f), builtin=builtin)
@ -160,14 +159,16 @@ class Controller:
@asyncio.coroutine
def start(self):
log.info("Start controller")
log.info("Controller is starting")
self.load_base_files()
server_config = Config.instance().get_section_config("Server")
Config.instance().listen_for_config_changes(self._update_config)
host = server_config.get("host", "localhost")
port = server_config.getint("port", 3080)
# If console_host is 0.0.0.0 client will use the ip they use
# to connect to the controller
# clients will use the IP they use to connect to
# the controller if console_host is 0.0.0.0
console_host = host
if host == "0.0.0.0":
host = "127.0.0.1"
@ -183,12 +184,12 @@ class Controller:
protocol=server_config.get("protocol", "http"),
host=host,
console_host=console_host,
port=server_config.getint("port", 3080),
port=port,
user=server_config.get("user", ""),
password=server_config.get("password", ""),
force=True)
except aiohttp.web_exceptions.HTTPConflict as e:
log.fatal("Can't access to the local server, make sure anything else is not running on the same port")
log.fatal("Cannot access to the local server, make sure something else is not running on the TCP port {}".format(port))
sys.exit(1)
for c in computes:
try:
@ -199,14 +200,14 @@ class Controller:
try:
yield from self.gns3vm.auto_start_vm()
except GNS3VMError as e:
log.warn(str(e))
log.warning(str(e))
yield from self._project_auto_open()
def _update_config(self):
"""
Call this when the server configuration file
change
Call this when the server configuration file changes.
"""
if self._local_server:
server_config = Config.instance().get_section_config("Server")
self._local_server.user = server_config.get("user")
@ -214,7 +215,8 @@ class Controller:
@asyncio.coroutine
def stop(self):
log.info("Stop controller")
log.info("Controller is Stopping")
for project in self._projects.values():
yield from project.close()
for compute in self._computes.values():
@ -231,6 +233,7 @@ class Controller:
"""
Save the controller configuration on disk
"""
# We don't save during the loading otherwise we could lost stuff
if self._settings is None:
return
@ -257,13 +260,14 @@ class Controller:
with open(self._config_file, 'w+') as f:
json.dump(data, f, indent=4)
except OSError as e:
log.error("Can't write the configuration {}: {}".format(self._config_file, str(e)))
log.error("Cannnot write configuration file '{}': {}".format(self._config_file, e))
@asyncio.coroutine
def _load_controller_settings(self):
"""
Reload the controller configuration from disk
"""
try:
if not os.path.exists(self._config_file):
yield from self._import_gns3_gui_conf()
@ -271,7 +275,7 @@ class Controller:
with open(self._config_file) as f:
data = json.load(f)
except (OSError, ValueError) as e:
log.critical("Cannot load %s: %s", self._config_file, str(e))
log.critical("Cannot load configuration file '{}': {}".format(self._config_file, e))
self._settings = {}
return []
@ -290,6 +294,7 @@ class Controller:
"""
Preload the list of projects from disk
"""
server_config = Config.instance().get_section_config("Server")
projects_path = os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects"))
os.makedirs(projects_path, exist_ok=True)
@ -311,6 +316,7 @@ class Controller:
At startup we copy base file to the user location to allow
them to customize it
"""
dst_path = self.configs_path()
src_path = get_resource('configs')
try:
@ -324,6 +330,7 @@ class Controller:
"""
Get the image storage directory
"""
server_config = Config.instance().get_section_config("Server")
images_path = os.path.expanduser(server_config.get("images_path", "~/GNS3/projects"))
os.makedirs(images_path, exist_ok=True)
@ -333,6 +340,7 @@ class Controller:
"""
Get the configs storage directory
"""
server_config = Config.instance().get_section_config("Server")
images_path = os.path.expanduser(server_config.get("configs_path", "~/GNS3/projects"))
os.makedirs(images_path, exist_ok=True)
@ -342,6 +350,7 @@ class Controller:
"""
Get the image storage directory
"""
server_config = Config.instance().get_section_config("Server")
appliances_path = os.path.expanduser(server_config.get("appliances_path", "~/GNS3/projects"))
os.makedirs(appliances_path, exist_ok=True)
@ -352,6 +361,7 @@ class Controller:
"""
Import old config from GNS3 GUI
"""
config_file = os.path.join(os.path.dirname(self._config_file), "gns3_gui.conf")
if os.path.exists(config_file):
with open(config_file) as f:
@ -404,12 +414,14 @@ class Controller:
"""
Store settings shared by the different GUI will be replace by dedicated API later. Dictionnary
"""
return self._settings
@settings.setter
def settings(self, val):
self._settings = val
self._settings["modification_uuid"] = str(uuid.uuid4()) # We add a modification id to the settings it's help the gui to detect changes
self._settings["modification_uuid"] = str(uuid.uuid4()) # We add a modification id to the settings to help the gui to detect changes
self.save()
self.load_appliances()
self.notification.emit("settings.updated", val)
@ -459,6 +471,7 @@ class Controller:
"""
Close projects running on a compute
"""
for project in self._projects.values():
if compute in project.computes:
yield from project.close()
@ -470,6 +483,7 @@ class Controller:
:param compute_id: Compute server identifier
"""
try:
compute = self.get_compute(compute_id)
except aiohttp.web.HTTPNotFound:
@ -485,6 +499,7 @@ class Controller:
"""
The notification system
"""
return self._notification
@property
@ -492,23 +507,26 @@ class Controller:
"""
:returns: The dictionary of compute server managed by this controller
"""
return self._computes
def get_compute(self, compute_id):
"""
Returns a compute server or raise a 404 error.
"""
try:
return self._computes[compute_id]
except KeyError:
if compute_id == "vm":
raise aiohttp.web.HTTPNotFound(text="You try to use a node on the GNS3 VM server but the GNS3 VM is not configured")
raise aiohttp.web.HTTPNotFound(text="Cannot use a node on the GNS3 VM server with the GNS3 VM not configured")
raise aiohttp.web.HTTPNotFound(text="Compute ID {} doesn't exist".format(compute_id))
def has_compute(self, compute_id):
"""
Return True if the compute exist in the controller
"""
return compute_id in self._computes
@asyncio.coroutine
@ -520,8 +538,8 @@ class Controller:
:param name: Project name
:param kwargs: See the documentation of Project
"""
if project_id not in self._projects:
if project_id not in self._projects:
for project in self._projects.values():
if name and project.name == name:
raise aiohttp.web.HTTPConflict(text='Project name "{}" already exists'.format(name))
@ -534,6 +552,7 @@ class Controller:
"""
Returns a project or raise a 404 error.
"""
try:
return self._projects[project_id]
except KeyError:
@ -546,11 +565,13 @@ class Controller:
If project is not finished to load wait for it
"""
project = self.get_project(project_id)
yield from project.wait_loaded()
return project
def remove_project(self, project):
if project.id in self._projects:
del self._projects[project.id]
@ -562,6 +583,7 @@ class Controller:
:param path: Path of the .gns3
:param load: Load the topology
"""
topo_data = load_topology(path)
topo_data.pop("topology")
topo_data.pop("version")
@ -581,6 +603,7 @@ class Controller:
"""
Auto open the project with auto open enable
"""
for project in self._projects.values():
if project.auto_open:
yield from project.open()
@ -589,6 +612,7 @@ class Controller:
"""
Generate a free project name base on the base name
"""
names = [p.name for p in self._projects.values()]
if base_name not in names:
return base_name
@ -610,6 +634,7 @@ class Controller:
"""
:returns: The dictionary of projects managed by GNS3
"""
return self._projects
@property
@ -617,6 +642,7 @@ class Controller:
"""
:returns: The dictionary of appliances templates managed by GNS3
"""
return self._appliance_templates
@property
@ -624,9 +650,11 @@ class Controller:
"""
:returns: The dictionary of appliances managed by GNS3
"""
return self._appliances
def projects_directory(self):
server_config = Config.instance().get_section_config("Server")
return os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects"))
@ -652,6 +680,7 @@ class Controller:
:param image: Image to use
:param ram: amount of RAM to use
"""
compute = self.get_compute(compute_id)
for project in list(self._projects.values()):
if project.name == "AUTOIDLEPC":

View File

@ -31,6 +31,7 @@ ID_TO_CATEGORY = {
class Appliance:
def __init__(self, appliance_id, data, builtin=False):
if appliance_id is None:
self._id = str(uuid.uuid4())
elif isinstance(appliance_id, uuid.UUID):

View File

@ -648,7 +648,7 @@ class Compute:
@asyncio.coroutine
def get_ip_on_same_subnet(self, other_compute):
"""
Try to found the best ip for communication from one compute
Try to find the best ip for communication from one compute
to another
:returns: Tuple (ip_for_this_compute, ip_for_other_compute)

View File

@ -71,7 +71,7 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
open(path).close()
except OSError as e:
msg = "Could not export file {}: {}".format(path, e)
log.warn(msg)
log.warning(msg)
project.controller.notification.emit("log.warning", {"message": msg})
continue
if file.endswith(".gns3"):

View File

@ -59,7 +59,7 @@ class GNS3VM:
"""
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VMware.Workstation.{version}.zip".format(version=__version__)
vmware_informations = {
vmware_info = {
"engine_id": "vmware",
"description": 'VMware is the recommended choice for best performances.<br>The GNS3 VM can be <a href="{}">downloaded here</a>.'.format(download_url),
"support_when_exit": True,
@ -67,12 +67,12 @@ class GNS3VM:
"support_ram": True
}
if sys.platform.startswith("darwin"):
vmware_informations["name"] = "VMware Fusion"
vmware_info["name"] = "VMware Fusion"
else:
vmware_informations["name"] = "VMware Workstation / Player"
vmware_info["name"] = "VMware Workstation / Player"
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VirtualBox.{version}.zip".format(version=__version__)
virtualbox_informations = {
virtualbox_info = {
"engine_id": "virtualbox",
"name": "VirtualBox",
"description": 'VirtualBox doesn\'t support nested virtualization, this means running Qemu based VM could be very slow.<br>The GNS3 VM can be <a href="{}">downloaded here</a>'.format(download_url),
@ -81,7 +81,7 @@ class GNS3VM:
"support_ram": True
}
remote_informations = {
remote_info = {
"engine_id": "remote",
"name": "Remote",
"description": "Use a remote GNS3 server as the GNS3 VM.",
@ -91,16 +91,18 @@ class GNS3VM:
}
return [
vmware_informations,
virtualbox_informations,
remote_informations
vmware_info,
virtualbox_info,
remote_info
]
def current_engine(self):
return self._get_engine(self._settings["engine"])
@property
def engine(self):
return self._settings["engine"]
@property
@ -110,6 +112,7 @@ class GNS3VM:
:returns: VM IP address
"""
return self.current_engine().ip_address
@property
@ -119,6 +122,7 @@ class GNS3VM:
:returns: Boolean
"""
return self.current_engine().running
@property
@ -128,6 +132,7 @@ class GNS3VM:
:returns: VM user
"""
return self.current_engine().user
@property
@ -137,6 +142,7 @@ class GNS3VM:
:returns: VM password
"""
return self.current_engine().password
@property
@ -146,6 +152,7 @@ class GNS3VM:
:returns: VM port
"""
return self.current_engine().port
@property
@ -155,6 +162,7 @@ class GNS3VM:
:returns: VM protocol
"""
return self.current_engine().protocol
@property
@ -162,6 +170,7 @@ class GNS3VM:
"""
The GNSVM is activated
"""
return self._settings.get("enable", False)
@property
@ -169,14 +178,17 @@ class GNS3VM:
"""
What should be done when exit
"""
return self._settings["when_exit"]
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, val):
self._settings.update(val)
@asyncio.coroutine
@ -184,6 +196,7 @@ class GNS3VM:
"""
Update settings and will restart the VM if require
"""
new_settings = copy.copy(self._settings)
new_settings.update(settings)
if self.settings != new_settings:
@ -201,6 +214,7 @@ class GNS3VM:
"""
Load an engine
"""
if engine in self._engines:
return self._engines[engine]
@ -223,6 +237,7 @@ class GNS3VM:
"""
List VMS for an engine
"""
engine = self._get_engine(engine)
vms = []
try:
@ -240,6 +255,7 @@ class GNS3VM:
"""
Auto start the GNS3 VM if require
"""
if self.enable:
try:
yield from self.start()
@ -256,6 +272,7 @@ class GNS3VM:
@asyncio.coroutine
def exit_vm(self):
if self.enable:
try:
if self._settings["when_exit"] == "stop":
@ -263,13 +280,14 @@ class GNS3VM:
elif self._settings["when_exit"] == "suspend":
yield from self._suspend()
except GNS3VMError as e:
log.warn(str(e))
log.warning(str(e))
@locked_coroutine
def start(self):
"""
Start the GNS3 VM
"""
engine = self.current_engine()
if not engine.running:
if self._settings["vmname"] is None:

View File

@ -38,7 +38,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
"""
Import a project contain in a zip file
You need to handle OSerror exceptions
You must handle OSError exceptions
:param controller: GNS3 Controller
:param project_id: ID of the project to import
@ -68,7 +68,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
else:
project_name = controller.get_free_project_name(topology["name"])
except KeyError:
raise aiohttp.web.HTTPConflict(text="Can't import topology the .gns3 is corrupted or missing")
raise aiohttp.web.HTTPConflict(text="Cannot import topology the .gns3 is corrupted or missing")
if location:
path = location

View File

@ -22,7 +22,6 @@ import copy
import uuid
import os
from .compute import ComputeConflict, ComputeError
from .ports.port_factory import PortFactory, StandardPortFactory, DynamipsPortFactory
from ..utils.images import images_directories

View File

@ -63,7 +63,7 @@ class Port:
@property
def short_name(self):
# If port name format has change we use the port name as the short name (1.X behavior)
# If port name format has changed we use the port name as the short name (1.X behavior)
if self._short_name:
return self._short_name
elif not self._name.startswith("{}{}".format(self.long_name_type(), self._interface_number)):

View File

@ -103,6 +103,7 @@ class StandardPortFactory:
{port0} => {port9}
{segment0} => {segment9}
"""
replacements = {}
for i in range(0, 9):
replacements["port" + str(i)] = interface_number + i
@ -114,6 +115,7 @@ class DynamipsPortFactory:
"""
Create port for dynamips devices
"""
ADAPTER_MATRIX = {
"C1700-MB-1FE": {"nb_ports": 1,
"port": FastEthernetPort},
@ -178,6 +180,7 @@ class DynamipsPortFactory:
}
def __new__(cls, properties):
ports = []
adapter_number = 0

View File

@ -18,11 +18,9 @@
import sys
from gns3server.web.route import Route
from gns3server.config import Config
from gns3server.schemas.capabilities import CAPABILITIES_SCHEMA
from gns3server.version import __version__
from gns3server.compute import MODULES
from aiohttp.web import HTTPConflict
class CapabilitiesHandler:

View File

@ -206,7 +206,7 @@ class Hypervisor(UBridgeHypervisor):
yield from wait_for_process_termination(self._process, timeout=3)
except asyncio.TimeoutError:
if self._process and self._process.returncode is None:
log.warn("uBridge process {} is still running... killing it".format(self._process.pid))
log.warning("uBridge process {} is still running... killing it".format(self._process.pid))
try:
self._process.kill()
except ProcessLookupError:
@ -232,7 +232,7 @@ class Hypervisor(UBridgeHypervisor):
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("could not read {}: {}".format(self._stdout_file, e))
log.warning("could not read {}: {}".format(self._stdout_file, e))
return output
def is_running(self):

View File

@ -83,7 +83,7 @@ def list_images(type):
"md5sum": md5sum(os.path.join(root, filename)),
"filesize": os.stat(os.path.join(root, filename)).st_size})
except OSError as e:
log.warn("Can't add image {}: {}".format(path, str(e)))
log.warning("Can't add image {}: {}".format(path, str(e)))
return images

View File

@ -113,7 +113,7 @@ def get_windows_interfaces():
"netmask": netmask,
"type": "ethernet"})
except (AttributeError, pywintypes.com_error):
log.warn("Could not use the COM service to retrieve interface info, trying using the registry...")
log.warning("Could not use the COM service to retrieve interface info, trying using the registry...")
return _get_windows_interfaces_from_registry()
return interfaces

View File

@ -190,7 +190,7 @@ class Route(object):
f.write("curl -X {} 'http://{}{}' -d '{}'".format(request.method, request.host, request.path_qs, json.dumps(request.json)))
f.write("\n")
except OSError as e:
log.warn("Could not write to the record file {}: {}".format(record_file, e))
log.warning("Could not write to the record file {}: {}".format(record_file, e))
response = Response(request=request, route=route, output_schema=output_schema)
yield from func(request, response)
except aiohttp.web.HTTPBadRequest as e:
@ -221,7 +221,7 @@ class Route(object):
response.set_status(408)
response.json({"message": "Request canceled", "status": 408})
except aiohttp.ClientError:
log.warn("Client error")
log.warning("Client error")
response = Response(request=request, route=route)
response.set_status(408)
response.json({"message": "Client error", "status": 408})