mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-11-16 08:44:52 +02:00
Refactoring to use a common node class for all VMs and other (future) objects.
This commit is contained in:
parent
9dca7dfe4a
commit
5a76f81271
@ -9,8 +9,8 @@ Get a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
- **device_id**: UUID for the instance
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
@ -38,8 +38,8 @@ Update a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
- **device_id**: UUID for the instance
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
@ -96,8 +96,8 @@ Delete a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
- **device_id**: UUID for the instance
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
|
@ -9,9 +9,9 @@ Add a NIO to a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the device
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
@ -139,9 +139,9 @@ Remove a NIO from a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the device
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
|
@ -9,9 +9,9 @@ Start a packet capture on a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the device
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
@ -26,6 +26,6 @@ Input
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>capture_file_name</td> <td>✔</td> <td>string</td> <td>Capture file name</td> </tr>
|
||||
<tr><td>data_link_type</td> <td> </td> <td>string</td> <td>PCAP data link type</td> </tr>
|
||||
<tr><td>data_link_type</td> <td> </td> <td>enum</td> <td>Possible values: DLT_ATM_RFC1483, DLT_EN10MB, DLT_FRELAY, DLT_C_HDLC</td> </tr>
|
||||
</table>
|
||||
|
||||
|
@ -9,9 +9,9 @@ Stop a packet capture on a Dynamips device instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the device
|
||||
- **device_id**: UUID for the instance
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
GET /v2/compute/virtualbox/vms
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Get all VirtualBox VMs available
|
||||
Get all available VirtualBox VMs
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
|
@ -40,3 +40,12 @@ Output
|
||||
<tr><td>version</td> <td> </td> <td>['string', 'null']</td> <td>Version of the GNS3 remote compute</td> </tr>
|
||||
</table>
|
||||
|
||||
|
||||
GET /v2/computes
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
List compute nodes
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **200**: Compute list
|
||||
|
||||
|
@ -22,6 +22,9 @@ Input
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>capture_file_name</td> <td> </td> <td>['string', 'null']</td> <td>Read only propertie. The name of the capture file if capture is running</td> </tr>
|
||||
<tr><td>capture_file_path</td> <td> </td> <td>['string', 'null']</td> <td>Read only propertie. The full path of the capture file if capture is running</td> </tr>
|
||||
<tr><td>capturing</td> <td> </td> <td>boolean</td> <td>Read only propertie. True if a capture running on the link</td> </tr>
|
||||
<tr><td>link_id</td> <td> </td> <td>string</td> <td>Link identifier</td> </tr>
|
||||
<tr><td>vms</td> <td>✔</td> <td>array</td> <td>List of the VMS</td> </tr>
|
||||
</table>
|
||||
@ -32,6 +35,9 @@ Output
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>capture_file_name</td> <td> </td> <td>['string', 'null']</td> <td>Read only propertie. The name of the capture file if capture is running</td> </tr>
|
||||
<tr><td>capture_file_path</td> <td> </td> <td>['string', 'null']</td> <td>Read only propertie. The full path of the capture file if capture is running</td> </tr>
|
||||
<tr><td>capturing</td> <td> </td> <td>boolean</td> <td>Read only propertie. True if a capture running on the link</td> </tr>
|
||||
<tr><td>link_id</td> <td> </td> <td>string</td> <td>Link identifier</td> </tr>
|
||||
<tr><td>vms</td> <td>✔</td> <td>array</td> <td>List of the VMS</td> </tr>
|
||||
</table>
|
||||
|
@ -9,8 +9,8 @@ Delete a link instance
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **link_id**: UUID of the link
|
||||
- **project_id**: UUID for the project
|
||||
- **link_id**: UUID of the link
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
|
@ -42,3 +42,12 @@ Sample session
|
||||
|
||||
.. literalinclude:: ../../../examples/controller_post_projects.txt
|
||||
|
||||
|
||||
GET /v2/projects
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
List projects
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **200**: List of projects
|
||||
|
||||
|
@ -40,14 +40,14 @@ from .nios.nio_tap import NIOTAP
|
||||
from .nios.nio_nat import NIONAT
|
||||
from .nios.nio_generic_ethernet import NIOGenericEthernet
|
||||
from ..utils.images import md5sum, remove_checksum
|
||||
from .vm_error import VMError
|
||||
from .node_error import NodeError
|
||||
|
||||
|
||||
class BaseManager:
|
||||
|
||||
"""
|
||||
Base class for all Manager classes.
|
||||
Responsible of management of a VM pool of the same type.
|
||||
Responsible of management of a node pool of the same type.
|
||||
"""
|
||||
|
||||
_convert_lock = None
|
||||
@ -55,7 +55,7 @@ class BaseManager:
|
||||
def __init__(self):
|
||||
|
||||
BaseManager._convert_lock = asyncio.Lock()
|
||||
self._vms = {}
|
||||
self._nodes = {}
|
||||
self._port_manager = None
|
||||
self._config = Config.instance()
|
||||
|
||||
@ -110,8 +110,8 @@ class BaseManager:
|
||||
def unload(self):
|
||||
|
||||
tasks = []
|
||||
for vm_id in self._vms.keys():
|
||||
tasks.append(asyncio.async(self.close_vm(vm_id)))
|
||||
for node_id in self._nodes.keys():
|
||||
tasks.append(asyncio.async(self.close_node(node_id)))
|
||||
|
||||
if tasks:
|
||||
done, _ = yield from asyncio.wait(tasks)
|
||||
@ -119,21 +119,21 @@ class BaseManager:
|
||||
try:
|
||||
future.result()
|
||||
except (Exception, GeneratorExit) as e:
|
||||
log.error("Could not close VM {}".format(e), exc_info=1)
|
||||
log.error("Could not close node {}".format(e), exc_info=1)
|
||||
continue
|
||||
|
||||
if hasattr(BaseManager, "_instance"):
|
||||
BaseManager._instance = None
|
||||
log.debug("Module {} unloaded".format(self.module_name))
|
||||
|
||||
def get_vm(self, vm_id, project_id=None):
|
||||
def get_node(self, node_id, project_id=None):
|
||||
"""
|
||||
Returns a VM instance.
|
||||
Returns a Node instance.
|
||||
|
||||
:param vm_id: VM identifier
|
||||
:param node_id: Node identifier
|
||||
:param project_id: Project identifier
|
||||
|
||||
:returns: VM instance
|
||||
:returns: Node instance
|
||||
"""
|
||||
|
||||
if project_id:
|
||||
@ -141,19 +141,19 @@ class BaseManager:
|
||||
project = ProjectManager.instance().get_project(project_id)
|
||||
|
||||
try:
|
||||
UUID(vm_id, version=4)
|
||||
UUID(node_id, version=4)
|
||||
except ValueError:
|
||||
raise aiohttp.web.HTTPBadRequest(text="VM ID {} is not a valid UUID".format(vm_id))
|
||||
raise aiohttp.web.HTTPBadRequest(text="Node ID {} is not a valid UUID".format(node_id))
|
||||
|
||||
if vm_id not in self._vms:
|
||||
raise aiohttp.web.HTTPNotFound(text="VM ID {} doesn't exist".format(vm_id))
|
||||
if node_id not in self._nodes:
|
||||
raise aiohttp.web.HTTPNotFound(text="Node ID {} doesn't exist".format(node_id))
|
||||
|
||||
vm = self._vms[vm_id]
|
||||
node = self._nodes[node_id]
|
||||
if project_id:
|
||||
if vm.project.id != project.id:
|
||||
raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't belong to VM {}".format(project_id, vm.name))
|
||||
if node.project.id != project.id:
|
||||
raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't belong to node {}".format(project_id, node.name))
|
||||
|
||||
return vm
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def convert_old_project(self, project, legacy_id, name):
|
||||
@ -194,8 +194,8 @@ class BaseManager:
|
||||
new_remote_project_path, e))
|
||||
|
||||
if hasattr(self, "get_legacy_vm_workdir"):
|
||||
# rename old project VM working dir
|
||||
log.info("Converting old VM working directory...")
|
||||
# rename old project node working dir
|
||||
log.info("Converting old node working directory...")
|
||||
legacy_vm_dir = self.get_legacy_vm_workdir(legacy_id, name)
|
||||
legacy_vm_working_path = os.path.join(new_project_files_path, legacy_vm_dir)
|
||||
new_vm_working_path = os.path.join(new_project_files_path, self.module_name.lower(), new_id)
|
||||
@ -204,57 +204,58 @@ class BaseManager:
|
||||
log.info('Moving "{}" to "{}"'.format(legacy_vm_working_path, new_vm_working_path))
|
||||
yield from wait_run_in_executor(shutil.move, legacy_vm_working_path, new_vm_working_path)
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not move VM working directory: {} to {} {}".format(legacy_vm_working_path,
|
||||
new_vm_working_path, e))
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not move vm working directory: {} to {} {}".format(legacy_vm_working_path,
|
||||
new_vm_working_path,e))
|
||||
|
||||
return new_id
|
||||
|
||||
@asyncio.coroutine
|
||||
def create_vm(self, name, project_id, vm_id, *args, **kwargs):
|
||||
def create_node(self, name, project_id, node_id, *args, **kwargs):
|
||||
"""
|
||||
Create a new VM
|
||||
Create a new node
|
||||
|
||||
:param name: VM name
|
||||
:param name: Node name
|
||||
:param project_id: Project identifier
|
||||
:param vm_id: restore a VM identifier
|
||||
:param node_id: restore a node identifier
|
||||
"""
|
||||
|
||||
if vm_id in self._vms:
|
||||
return self._vms[vm_id]
|
||||
if node_id in self._nodes:
|
||||
return self._nodes[node_id]
|
||||
|
||||
project = ProjectManager.instance().get_project(project_id)
|
||||
if vm_id and isinstance(vm_id, int):
|
||||
if node_id and isinstance(node_id, int):
|
||||
# old project
|
||||
with (yield from BaseManager._convert_lock):
|
||||
vm_id = yield from self.convert_old_project(project, vm_id, name)
|
||||
node_id = yield from self.convert_old_project(project, node_id, name)
|
||||
|
||||
if not vm_id:
|
||||
vm_id = str(uuid4())
|
||||
if not node_id:
|
||||
node_id = str(uuid4())
|
||||
|
||||
vm = self._VM_CLASS(name, vm_id, project, self, *args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(vm.create):
|
||||
yield from vm.create()
|
||||
node = self._NODE_CLASS(name, node_id, project, self, *args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(node.create):
|
||||
yield from node.create()
|
||||
else:
|
||||
vm.create()
|
||||
self._vms[vm.id] = vm
|
||||
project.add_vm(vm)
|
||||
return vm
|
||||
node.create()
|
||||
self._nodes[node.id] = node
|
||||
project.add_node(node)
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def close_vm(self, vm_id):
|
||||
def close_node(self, node_id):
|
||||
"""
|
||||
Close a VM
|
||||
Close a node
|
||||
|
||||
:param vm_id: VM identifier
|
||||
:param node_id: Node identifier
|
||||
|
||||
:returns: VM instance
|
||||
:returns: Node instance
|
||||
"""
|
||||
|
||||
vm = self.get_vm(vm_id)
|
||||
if asyncio.iscoroutinefunction(vm.close):
|
||||
yield from vm.close()
|
||||
node = self.get_node(node_id)
|
||||
if asyncio.iscoroutinefunction(node.close):
|
||||
yield from node.close()
|
||||
else:
|
||||
vm.close()
|
||||
return vm
|
||||
node.close()
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def project_closing(self, project):
|
||||
@ -274,9 +275,9 @@ class BaseManager:
|
||||
:param project: Project instance
|
||||
"""
|
||||
|
||||
for vm in project.vms:
|
||||
if vm.id in self._vms:
|
||||
del self._vms[vm.id]
|
||||
for node in project.nodes:
|
||||
if node.id in self._nodes:
|
||||
del self._nodes[node.id]
|
||||
|
||||
@asyncio.coroutine
|
||||
def project_moved(self, project):
|
||||
@ -299,20 +300,19 @@ class BaseManager:
|
||||
pass
|
||||
|
||||
@asyncio.coroutine
|
||||
def delete_vm(self, vm_id):
|
||||
def delete_node(self, node_id):
|
||||
"""
|
||||
Delete a VM. VM working directory will be destroy when
|
||||
we receive a commit.
|
||||
Delete a node. The node working directory will be destroyed when a commit is received.
|
||||
|
||||
:param vm_id: VM identifier
|
||||
:returns: VM instance
|
||||
:param node_id: Node identifier
|
||||
:returns: Node instance
|
||||
"""
|
||||
|
||||
vm = yield from self.close_vm(vm_id)
|
||||
vm.project.mark_vm_for_destruction(vm)
|
||||
if vm.id in self._vms:
|
||||
del self._vms[vm.id]
|
||||
return vm
|
||||
node = yield from self.close_node(node_id)
|
||||
node.project.mark_node_for_destruction(node)
|
||||
if node.id in self._nodes:
|
||||
del self._nodes[node.id]
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
def has_privileged_access(executable):
|
||||
@ -408,7 +408,7 @@ class BaseManager:
|
||||
# Windows path should not be send to a unix server
|
||||
if not sys.platform.startswith("win"):
|
||||
if re.match(r"^[A-Z]:", path) is not None:
|
||||
raise VMError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory))
|
||||
raise NodeError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory))
|
||||
|
||||
if not os.path.isabs(path):
|
||||
s = os.path.split(path)
|
||||
@ -429,7 +429,7 @@ class BaseManager:
|
||||
img_directory = force_unix_path(img_directory)
|
||||
path = force_unix_path(path)
|
||||
if len(os.path.commonprefix([img_directory, path])) < len(img_directory):
|
||||
raise VMError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory))
|
||||
raise NodeError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory))
|
||||
|
||||
return force_unix_path(path)
|
||||
|
||||
@ -454,7 +454,7 @@ class BaseManager:
|
||||
@asyncio.coroutine
|
||||
def list_images(self):
|
||||
"""
|
||||
Return the list of available images for this VM type
|
||||
Return the list of available images for this node type
|
||||
|
||||
:returns: Array of hash
|
||||
"""
|
||||
|
@ -24,34 +24,33 @@ import tempfile
|
||||
import psutil
|
||||
import platform
|
||||
|
||||
from gns3server.utils import parse_version
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
from ..ubridge.hypervisor import Hypervisor
|
||||
from .vm_error import VMError
|
||||
from .node_error import NodeError
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseVM:
|
||||
class BaseNode:
|
||||
|
||||
"""
|
||||
Base vm implementation.
|
||||
Base node implementation.
|
||||
|
||||
:param name: name of this IOU vm
|
||||
:param vm_id: IOU instance identifier
|
||||
:param name: name of this node
|
||||
:param node_id: Node instance identifier
|
||||
:param project: Project instance
|
||||
:param manager: parent VM Manager
|
||||
:param manager: parent node manager
|
||||
:param console: TCP console port
|
||||
:param aux: TCP aux console port
|
||||
:param allocate_aux: Boolean if true will allocate an aux console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False):
|
||||
def __init__(self, name, node_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False):
|
||||
|
||||
self._name = name
|
||||
self._usage = ""
|
||||
self._id = vm_id
|
||||
self._id = node_id
|
||||
self._project = project
|
||||
self._manager = manager
|
||||
self._console = console
|
||||
@ -61,7 +60,7 @@ class BaseVM:
|
||||
self._hw_virtualization = False
|
||||
self._ubridge_hypervisor = None
|
||||
self._closed = False
|
||||
self._vm_status = "stopped"
|
||||
self._node_status = "stopped"
|
||||
self._command_line = ""
|
||||
self._allocate_aux = allocate_aux
|
||||
|
||||
@ -98,19 +97,19 @@ class BaseVM:
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
"""Return current VM status"""
|
||||
"""Return current node status"""
|
||||
|
||||
return self._vm_status
|
||||
return self._node_status
|
||||
|
||||
@status.setter
|
||||
def status(self, status):
|
||||
|
||||
self._vm_status = status
|
||||
self._project.emit("vm.{}".format(status), self)
|
||||
self._node_status = status
|
||||
self._project.emit("node.{}".format(status), self)
|
||||
|
||||
@property
|
||||
def command_line(self):
|
||||
"""Return command used to start the VM"""
|
||||
"""Return command used to start the node"""
|
||||
|
||||
return self._command_line
|
||||
|
||||
@ -122,7 +121,7 @@ class BaseVM:
|
||||
@property
|
||||
def project(self):
|
||||
"""
|
||||
Returns the VM current project.
|
||||
Returns the node current project.
|
||||
|
||||
:returns: Project instance.
|
||||
"""
|
||||
@ -132,7 +131,7 @@ class BaseVM:
|
||||
@property
|
||||
def name(self):
|
||||
"""
|
||||
Returns the name for this VM.
|
||||
Returns the name for this node.
|
||||
|
||||
:returns: name
|
||||
"""
|
||||
@ -142,7 +141,7 @@ class BaseVM:
|
||||
@name.setter
|
||||
def name(self, new_name):
|
||||
"""
|
||||
Sets the name of this VM.
|
||||
Sets the name of this node.
|
||||
|
||||
:param new_name: name
|
||||
"""
|
||||
@ -156,7 +155,7 @@ class BaseVM:
|
||||
@property
|
||||
def usage(self):
|
||||
"""
|
||||
Returns the usage for this VM.
|
||||
Returns the usage for this node.
|
||||
|
||||
:returns: usage
|
||||
"""
|
||||
@ -166,7 +165,7 @@ class BaseVM:
|
||||
@usage.setter
|
||||
def usage(self, new_usage):
|
||||
"""
|
||||
Sets the usage of this VM.
|
||||
Sets the usage of this node.
|
||||
|
||||
:param new_usage: usage
|
||||
"""
|
||||
@ -176,9 +175,9 @@ class BaseVM:
|
||||
@property
|
||||
def id(self):
|
||||
"""
|
||||
Returns the ID for this VM.
|
||||
Returns the ID for this node.
|
||||
|
||||
:returns: VM identifier (string)
|
||||
:returns: Node identifier (string)
|
||||
"""
|
||||
|
||||
return self._id
|
||||
@ -186,7 +185,7 @@ class BaseVM:
|
||||
@property
|
||||
def manager(self):
|
||||
"""
|
||||
Returns the manager for this VM.
|
||||
Returns the manager for this node.
|
||||
|
||||
:returns: instance of manager
|
||||
"""
|
||||
@ -196,10 +195,10 @@ class BaseVM:
|
||||
@property
|
||||
def working_dir(self):
|
||||
"""
|
||||
Return VM working directory
|
||||
Return the node working directory
|
||||
"""
|
||||
|
||||
return self._project.vm_working_directory(self)
|
||||
return self._project.node_working_directory(self)
|
||||
|
||||
@property
|
||||
def temporary_directory(self):
|
||||
@ -207,43 +206,43 @@ class BaseVM:
|
||||
try:
|
||||
self._temporary_directory = tempfile.mkdtemp()
|
||||
except OSError as e:
|
||||
raise VMError("Can't create temporary directory: {}".format(e))
|
||||
raise NodeError("Can't create temporary directory: {}".format(e))
|
||||
return self._temporary_directory
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
Creates the VM.
|
||||
Creates the node.
|
||||
"""
|
||||
|
||||
log.info("{module}: {name} [{id}] created".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id))
|
||||
self._project.emit("vm.created", self)
|
||||
self._project.emit("node.created", self)
|
||||
|
||||
@asyncio.coroutine
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the VM (including all its files).
|
||||
Delete the node (including all its files).
|
||||
"""
|
||||
|
||||
self._project.emit("vm.deleted", self)
|
||||
directory = self.project.vm_working_directory(self)
|
||||
self._project.emit("node.deleted", self)
|
||||
directory = self.project.node_working_directory(self)
|
||||
if os.path.exists(directory):
|
||||
try:
|
||||
yield from wait_run_in_executor(shutil.rmtree, directory)
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not delete the VM working directory: {}".format(e))
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not delete the node working directory: {}".format(e))
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Starts the VM process.
|
||||
Starts the node process.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Starts the VM process.
|
||||
Starts the node process.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
@ -251,7 +250,7 @@ class BaseVM:
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
"""
|
||||
Close the VM process.
|
||||
Close the node process.
|
||||
"""
|
||||
|
||||
if self._closed:
|
||||
@ -290,7 +289,7 @@ class BaseVM:
|
||||
@property
|
||||
def aux(self):
|
||||
"""
|
||||
Returns the aux console port of this VM.
|
||||
Returns the aux console port of this node.
|
||||
|
||||
:returns: aux console port
|
||||
"""
|
||||
@ -321,7 +320,7 @@ class BaseVM:
|
||||
@property
|
||||
def console(self):
|
||||
"""
|
||||
Returns the console port of this VM.
|
||||
Returns the console port of this node.
|
||||
|
||||
:returns: console port
|
||||
"""
|
||||
@ -340,7 +339,7 @@ class BaseVM:
|
||||
return
|
||||
|
||||
if self._console_type == "vnc" and console is not None and console < 5900:
|
||||
raise VMError("VNC console require a port superior or equal to 5900 currently it's {}".format(console))
|
||||
raise NodeError("VNC console require a port superior or equal to 5900 currently it's {}".format(console))
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
@ -359,7 +358,7 @@ class BaseVM:
|
||||
@property
|
||||
def console_type(self):
|
||||
"""
|
||||
Returns the console type for this VM.
|
||||
Returns the console type for this node.
|
||||
|
||||
:returns: console type (string)
|
||||
"""
|
||||
@ -369,15 +368,11 @@ class BaseVM:
|
||||
@console_type.setter
|
||||
def console_type(self, console_type):
|
||||
"""
|
||||
Sets the console type for this VM.
|
||||
Sets the console type for this node.
|
||||
|
||||
:param console_type: console type (string)
|
||||
"""
|
||||
|
||||
log.info('QEMU VM "{name}" [{id}] has set the console type to {console_type}'.format(name=self._name,
|
||||
id=self._id,
|
||||
console_type=console_type))
|
||||
|
||||
if console_type != self._console_type:
|
||||
# get a new port if the console type change
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
@ -406,17 +401,17 @@ class BaseVM:
|
||||
path = shutil.which("ubridge")
|
||||
|
||||
if path is None or len(path) == 0:
|
||||
raise VMError("uBridge is not installed")
|
||||
raise NodeError("uBridge is not installed")
|
||||
return path
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_ubridge(self):
|
||||
"""
|
||||
Starts uBridge (handles connections to and from this VMware VM).
|
||||
Starts uBridge (handles connections to and from this node).
|
||||
"""
|
||||
|
||||
if not self._manager.has_privileged_access(self.ubridge_path):
|
||||
raise VMError("uBridge requires root access or capability to interact with network adapters")
|
||||
raise NodeError("uBridge requires root access or capability to interact with network adapters")
|
||||
|
||||
server_config = self._manager.config.get_section_config("Server")
|
||||
server_host = server_config.get("host")
|
||||
@ -430,7 +425,7 @@ class BaseVM:
|
||||
@property
|
||||
def hw_virtualization(self):
|
||||
"""
|
||||
Returns either the VM is using hardware virtualization or not.
|
||||
Returns either the node is using hardware virtualization or not.
|
||||
|
||||
:return: boolean
|
||||
"""
|
@ -37,7 +37,7 @@ DOCKER_MINIMUM_API_VERSION = "1.21"
|
||||
|
||||
class Docker(BaseManager):
|
||||
|
||||
_VM_CLASS = DockerVM
|
||||
_NODE_CLASS = DockerVM
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -19,10 +19,10 @@
|
||||
Custom exceptions for the Docker module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class DockerError(VMError):
|
||||
class DockerError(NodeError):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -27,9 +27,8 @@ import aiohttp
|
||||
import json
|
||||
import os
|
||||
|
||||
from ...ubridge.hypervisor import Hypervisor
|
||||
from .docker_error import *
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ...utils.asyncio.telnet_server import AsyncioTelnetServer
|
||||
@ -43,11 +42,11 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerVM(BaseVM):
|
||||
class DockerVM(BaseNode):
|
||||
"""Docker container implementation.
|
||||
|
||||
:param name: Docker container name
|
||||
:param vm_id: Docker VM identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Manager instance
|
||||
:param image: Docker image
|
||||
@ -59,11 +58,11 @@ class DockerVM(BaseVM):
|
||||
:param console_http_path: Url part with the path of the web interface
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, image,
|
||||
def __init__(self, name, node_id, project, manager, image,
|
||||
console=None, aux=None, start_command=None,
|
||||
adapters=None, environment=None, console_type="telnet",
|
||||
console_resolution="1024x768", console_http_port=80, console_http_path="/"):
|
||||
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=True, console_type=console_type)
|
||||
super().__init__(name, node_id, project, manager, console=console, aux=aux, allocate_aux=True, console_type=console_type)
|
||||
|
||||
self._image = image
|
||||
self._start_command = start_command
|
||||
@ -93,7 +92,7 @@ class DockerVM(BaseVM):
|
||||
def __json__(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
"vm_id": self._id,
|
||||
"node_id": self._id,
|
||||
"container_id": self._cid,
|
||||
"project_id": self._project.id,
|
||||
"image": self._image,
|
||||
@ -757,7 +756,7 @@ class DockerVM(BaseVM):
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
raise VMwareError("Cannot start the packet capture: uBridge is not running")
|
||||
raise DockerError("Cannot start the packet capture: uBridge is not running")
|
||||
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file))
|
||||
|
||||
@asyncio.coroutine
|
||||
@ -770,7 +769,7 @@ class DockerVM(BaseVM):
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
raise VMwareError("Cannot stop the packet capture: uBridge is not running")
|
||||
raise DockerError("Cannot stop the packet capture: uBridge is not running")
|
||||
yield from self._ubridge_hypervisor.send("bridge stop_capture {name}".format(name=adapter))
|
||||
|
||||
@asyncio.coroutine
|
||||
|
@ -102,7 +102,7 @@ WIC_MATRIX = {"WIC-1ENET": WIC_1ENET,
|
||||
|
||||
class Dynamips(BaseManager):
|
||||
|
||||
_VM_CLASS = DynamipsVM
|
||||
_NODE_CLASS = DynamipsVM
|
||||
_DEVICE_CLASS = DynamipsDevice
|
||||
_ghost_ios_lock = None
|
||||
|
||||
@ -142,7 +142,7 @@ class Dynamips(BaseManager):
|
||||
|
||||
def release_dynamips_id(self, project_id, dynamips_id):
|
||||
"""
|
||||
A dynamips id can be reused by another vm
|
||||
A Dynamips id can be reused by another VM
|
||||
|
||||
:param project_id: UUID of the project
|
||||
:param dynamips_id: Asked id
|
||||
@ -231,9 +231,9 @@ class Dynamips(BaseManager):
|
||||
:param project: Project instance
|
||||
"""
|
||||
|
||||
for vm in self._vms.values():
|
||||
if vm.project.id == project.id:
|
||||
yield from vm.hypervisor.set_working_dir(project.module_working_directory(self.module_name.lower()))
|
||||
for node in self._nodes.values():
|
||||
if node.project.id == project.id:
|
||||
yield from node.hypervisor.set_working_dir(project.module_working_directory(self.module_name.lower()))
|
||||
|
||||
for device in self._devices.values():
|
||||
if device.project.id == project.id:
|
||||
@ -248,10 +248,10 @@ class Dynamips(BaseManager):
|
||||
"""
|
||||
|
||||
# save the configs when the project is committed
|
||||
for vm in self._vms.copy().values():
|
||||
if vm.project.id == project.id:
|
||||
for node in self._nodes.copy().values():
|
||||
if node.project.id == project.id:
|
||||
try:
|
||||
yield from vm.save_configs()
|
||||
yield from node.save_configs()
|
||||
except DynamipsError as e:
|
||||
log.warning(e)
|
||||
continue
|
||||
@ -273,7 +273,6 @@ class Dynamips(BaseManager):
|
||||
|
||||
:param name: Device name
|
||||
:param project_id: Project identifier
|
||||
:param vm_id: restore a VM identifier
|
||||
"""
|
||||
|
||||
project = ProjectManager.instance().get_project(project_id)
|
||||
|
@ -19,9 +19,9 @@
|
||||
Custom exceptions for the Dynamips module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class DynamipsError(VMError):
|
||||
class DynamipsError(NodeError):
|
||||
|
||||
pass
|
||||
|
@ -43,9 +43,9 @@ class DynamipsVM:
|
||||
Factory to create an Router object based on the correct platform.
|
||||
"""
|
||||
|
||||
def __new__(cls, name, vm_id, project, manager, dynamips_id, platform, **kwargs):
|
||||
def __new__(cls, name, node_id, project, manager, dynamips_id, platform, **kwargs):
|
||||
|
||||
if platform not in PLATFORMS:
|
||||
raise DynamipsError("Unknown router platform: {}".format(platform))
|
||||
|
||||
return PLATFORMS[platform](name, vm_id, project, manager, dynamips_id, **kwargs)
|
||||
return PLATFORMS[platform](name, node_id, project, manager, dynamips_id, **kwargs)
|
||||
|
@ -35,7 +35,7 @@ class C1700(Router):
|
||||
Dynamips c1700 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node instance identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -46,9 +46,9 @@ class C1700(Router):
|
||||
1710 is not supported.
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis="1720"):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis="1720"):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c1700")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c1700")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 64
|
||||
|
@ -37,7 +37,7 @@ class C2600(Router):
|
||||
Dynamips c2600 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -61,9 +61,9 @@ class C2600(Router):
|
||||
"2650XM": C2600_MB_1FE,
|
||||
"2651XM": C2600_MB_2FE}
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis="2610"):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis="2610"):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c2600")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c2600")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 64
|
||||
|
@ -35,7 +35,7 @@ class C2691(Router):
|
||||
Dynamips c2691 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -43,9 +43,9 @@ class C2691(Router):
|
||||
:param aux: auxiliary console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c2691")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c2691")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 128
|
||||
|
@ -34,7 +34,7 @@ class C3600(Router):
|
||||
Dynamips c3600 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -44,9 +44,9 @@ class C3600(Router):
|
||||
3620, 3640 or 3660 (default = 3640).
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis="3640"):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis="3640"):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c3600")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c3600")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 128
|
||||
|
@ -35,7 +35,7 @@ class C3725(Router):
|
||||
Dynamips c3725 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -43,9 +43,9 @@ class C3725(Router):
|
||||
:param aux: auxiliary console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c3725")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c3725")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 128
|
||||
|
@ -35,7 +35,7 @@ class C3745(Router):
|
||||
Dynamips c3745 router.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -43,9 +43,9 @@ class C3745(Router):
|
||||
:param aux: auxiliary console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, chassis=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c3745")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c3745")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 128
|
||||
|
@ -37,7 +37,7 @@ class C7200(Router):
|
||||
Dynamips c7200 router (model is 7206).
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -46,9 +46,9 @@ class C7200(Router):
|
||||
:param npe: Default NPE
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id, console=None, aux=None, npe="npe-400", chassis=None):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id, console=None, aux=None, npe="npe-400", chassis=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, dynamips_id, console, aux, platform="c7200")
|
||||
super().__init__(name, node_id, project, manager, dynamips_id, console, aux, platform="c7200")
|
||||
|
||||
# Set default values for this platform (must be the same as Dynamips)
|
||||
self._ram = 256
|
||||
|
@ -69,7 +69,7 @@ class Device:
|
||||
@name.setter
|
||||
def name(self, new_name):
|
||||
"""
|
||||
Sets the name of this VM.
|
||||
Sets the name of this device.
|
||||
|
||||
:param new_name: name
|
||||
"""
|
||||
|
@ -31,7 +31,7 @@ import binascii
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from ...base_vm import BaseVM
|
||||
from ...base_node import BaseNode
|
||||
from ..dynamips_error import DynamipsError
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
|
||||
@ -39,13 +39,13 @@ from gns3server.utils.asyncio import wait_run_in_executor, monitor_process
|
||||
from gns3server.utils.images import md5sum
|
||||
|
||||
|
||||
class Router(BaseVM):
|
||||
class Router(BaseNode):
|
||||
|
||||
"""
|
||||
Dynamips router implementation.
|
||||
|
||||
:param name: The name of this router
|
||||
:param vm_id: Router instance identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Parent VM Manager
|
||||
:param dynamips_id: ID to use with Dynamips
|
||||
@ -59,11 +59,11 @@ class Router(BaseVM):
|
||||
2: "running",
|
||||
3: "suspended"}
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id=None, console=None, aux=None, platform="c7200", hypervisor=None, ghost_flag=False):
|
||||
def __init__(self, name, node_id, project, manager, dynamips_id=None, console=None, aux=None, platform="c7200", hypervisor=None, ghost_flag=False):
|
||||
|
||||
allocate_aux = manager.config.get_section_config("Dynamips").getboolean("allocate_aux_console_ports", False)
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=aux)
|
||||
super().__init__(name, node_id, project, manager, console=console, aux=aux, allocate_aux=aux)
|
||||
|
||||
self._hypervisor = hypervisor
|
||||
self._dynamips_id = dynamips_id
|
||||
@ -111,7 +111,7 @@ class Router(BaseVM):
|
||||
def __json__(self):
|
||||
|
||||
router_info = {"name": self.name,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"vm_directory": os.path.join(self.project.module_working_directory(self.manager.module_name.lower())),
|
||||
"project_id": self.project.id,
|
||||
"dynamips_id": self._dynamips_id,
|
||||
@ -1549,7 +1549,7 @@ class Router(BaseVM):
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the VM (including all its files).
|
||||
Delete this VM (including all its files).
|
||||
"""
|
||||
|
||||
# delete the VM files
|
||||
|
@ -32,7 +32,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
class IOU(BaseManager):
|
||||
|
||||
_VM_CLASS = IOUVM
|
||||
_NODE_CLASS = IOUVM
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@ -41,35 +41,35 @@ class IOU(BaseManager):
|
||||
self._used_application_ids = {}
|
||||
|
||||
@asyncio.coroutine
|
||||
def create_vm(self, *args, **kwargs):
|
||||
def create_node(self, *args, **kwargs):
|
||||
"""
|
||||
Creates a new IOU VM.
|
||||
|
||||
:returns: IOUVM instance
|
||||
"""
|
||||
|
||||
vm = yield from super().create_vm(*args, **kwargs)
|
||||
node = yield from super().create_node(*args, **kwargs)
|
||||
try:
|
||||
self._used_application_ids[vm.id] = self._free_application_ids.pop(0)
|
||||
self._used_application_ids[node.id] = self._free_application_ids.pop(0)
|
||||
except IndexError:
|
||||
raise IOUError("Cannot create a new IOU VM (limit of 512 VMs reached on this host)")
|
||||
return vm
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def close_vm(self, vm_id, *args, **kwargs):
|
||||
def close_node(self, node_id, *args, **kwargs):
|
||||
"""
|
||||
Closes an IOU VM.
|
||||
|
||||
:returns: IOUVM instance
|
||||
"""
|
||||
|
||||
vm = self.get_vm(vm_id)
|
||||
if vm_id in self._used_application_ids:
|
||||
i = self._used_application_ids[vm_id]
|
||||
node = self.get_node(node_id)
|
||||
if node_id in self._used_application_ids:
|
||||
i = self._used_application_ids[node_id]
|
||||
self._free_application_ids.insert(0, i)
|
||||
del self._used_application_ids[vm_id]
|
||||
yield from super().close_vm(vm_id, *args, **kwargs)
|
||||
return vm
|
||||
del self._used_application_ids[node_id]
|
||||
yield from super().close_node(node_id, *args, **kwargs)
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def project_committed(self, project):
|
||||
@ -80,32 +80,32 @@ class IOU(BaseManager):
|
||||
"""
|
||||
|
||||
# save the configs when the project is committed
|
||||
for vm in self._vms.copy().values():
|
||||
if vm.project.id == project.id:
|
||||
for node in self._nodes.copy().values():
|
||||
if node.project.id == project.id:
|
||||
try:
|
||||
vm.save_configs()
|
||||
node.save_configs()
|
||||
except IOUError as e:
|
||||
log.warning(e)
|
||||
continue
|
||||
|
||||
def get_application_id(self, vm_id):
|
||||
def get_application_id(self, node_id):
|
||||
"""
|
||||
Get an unique application identifier for IOU.
|
||||
|
||||
:param vm_id: IOU VM identifier
|
||||
:param node_id: Node identifier
|
||||
|
||||
:returns: IOU application identifier
|
||||
"""
|
||||
|
||||
return self._used_application_ids.get(vm_id, 1)
|
||||
return self._used_application_ids.get(node_id, 1)
|
||||
|
||||
@staticmethod
|
||||
def get_legacy_vm_workdir(legacy_vm_id, name):
|
||||
"""
|
||||
Returns the name of the legacy working directory (pre 1.3) name for a VM.
|
||||
Returns the name of the legacy working directory (pre 1.3) name for a node.
|
||||
|
||||
:param legacy_vm_id: legacy VM identifier (integer)
|
||||
:param name: VM name (not used)
|
||||
:param legacy_vm_id: legacy node identifier (integer)
|
||||
:param name: Node name (not used)
|
||||
|
||||
:returns: working directory name
|
||||
"""
|
||||
|
@ -19,8 +19,8 @@
|
||||
Custom exceptions for the IOU module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class IOUError(VMError):
|
||||
class IOUError(NodeError):
|
||||
pass
|
||||
|
@ -42,7 +42,7 @@ from ..adapters.serial_adapter import SerialAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_tap import NIOTAP
|
||||
from ..nios.nio_generic_ethernet import NIOGenericEthernet
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
from .utils.iou_import import nvram_import
|
||||
from .utils.iou_export import nvram_export
|
||||
from .ioucon import start_ioucon
|
||||
@ -55,22 +55,22 @@ import sys
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IOUVM(BaseVM):
|
||||
class IOUVM(BaseNode):
|
||||
module_name = 'iou'
|
||||
|
||||
"""
|
||||
IOU VM implementation.
|
||||
|
||||
:param name: IOU VM name
|
||||
:param vm_id: IOU VM identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Manager instance
|
||||
:param console: TCP console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, console=None):
|
||||
def __init__(self, name, node_id, project, manager, console=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console)
|
||||
super().__init__(name, node_id, project, manager, console=console)
|
||||
|
||||
self._iouyap_process = None
|
||||
self._iou_process = None
|
||||
@ -202,7 +202,7 @@ class IOUVM(BaseVM):
|
||||
def __json__(self):
|
||||
|
||||
iou_vm_info = {"name": self.name,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"vm_directory": self.working_dir,
|
||||
"console": self._console,
|
||||
"project_id": self.project.id,
|
||||
@ -315,7 +315,7 @@ class IOUVM(BaseVM):
|
||||
new_nvram=nvram))
|
||||
self._nvram = nvram
|
||||
|
||||
@BaseVM.name.setter
|
||||
@BaseNode.name.setter
|
||||
def name(self, new_name):
|
||||
"""
|
||||
Sets the name of this IOU VM.
|
||||
|
@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class VMError(Exception):
|
||||
class NodeError(Exception):
|
||||
|
||||
def __init__(self, message, original_exception=None):
|
||||
|
@ -24,7 +24,7 @@ import zipstream
|
||||
import zipfile
|
||||
import json
|
||||
|
||||
from uuid import UUID, uuid4
|
||||
from uuid import UUID
|
||||
from .port_manager import PortManager
|
||||
from .notification_manager import NotificationManager
|
||||
from ..config import Config
|
||||
@ -38,8 +38,8 @@ log = logging.getLogger(__name__)
|
||||
class Project:
|
||||
|
||||
"""
|
||||
A project contains a list of VM.
|
||||
In theory VM are isolated project/project.
|
||||
A project contains a list of nodes.
|
||||
In theory nodes are isolated project/project.
|
||||
|
||||
:param project_id: force project identifier (None by default auto generate an UUID)
|
||||
:param path: path of the project. (None use the standard directory)
|
||||
@ -55,8 +55,8 @@ class Project:
|
||||
raise aiohttp.web.HTTPBadRequest(text="{} is not a valid UUID".format(project_id))
|
||||
self._id = project_id
|
||||
|
||||
self._vms = set()
|
||||
self._vms_to_destroy = set()
|
||||
self._nodes = set()
|
||||
self._nodes_to_destroy = set()
|
||||
self.temporary = temporary
|
||||
self._used_tcp_ports = set()
|
||||
self._used_udp_ports = set()
|
||||
@ -155,9 +155,9 @@ class Project:
|
||||
self._name = name
|
||||
|
||||
@property
|
||||
def vms(self):
|
||||
def nodes(self):
|
||||
|
||||
return self._vms
|
||||
return self._nodes
|
||||
|
||||
@property
|
||||
def temporary(self):
|
||||
@ -260,21 +260,21 @@ class Project:
|
||||
|
||||
return os.path.join(self._path, "project-files", module_name)
|
||||
|
||||
def vm_working_directory(self, vm):
|
||||
def node_working_directory(self, node):
|
||||
"""
|
||||
Returns a working directory for a specific VM.
|
||||
Returns a working directory for a specific node.
|
||||
If the directory doesn't exist, the directory is created.
|
||||
|
||||
:param vm: VM instance
|
||||
:param node: Node instance
|
||||
|
||||
:returns: VM working directory
|
||||
:returns: Node working directory
|
||||
"""
|
||||
|
||||
workdir = os.path.join(self._path, "project-files", vm.manager.module_name.lower(), vm.id)
|
||||
workdir = os.path.join(self._path, "project-files", node.manager.module_name.lower(), node.id)
|
||||
try:
|
||||
os.makedirs(workdir, exist_ok=True)
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create the VM working directory: {}".format(e))
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create the node working directory: {}".format(e))
|
||||
return workdir
|
||||
|
||||
def tmp_working_directory(self):
|
||||
@ -297,34 +297,34 @@ class Project:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create the capture working directory: {}".format(e))
|
||||
return workdir
|
||||
|
||||
def mark_vm_for_destruction(self, vm):
|
||||
def mark_node_for_destruction(self, node):
|
||||
"""
|
||||
:param vm: An instance of VM
|
||||
:param node: An instance of Node
|
||||
"""
|
||||
|
||||
self.remove_vm(vm)
|
||||
self._vms_to_destroy.add(vm)
|
||||
self.remove_node(node)
|
||||
self._nodes_to_destroy.add(node)
|
||||
|
||||
def add_vm(self, vm):
|
||||
def add_node(self, node):
|
||||
"""
|
||||
Adds a VM to the project.
|
||||
In theory this should be called by the VM manager.
|
||||
Adds a node to the project.
|
||||
In theory this should be called by the node manager.
|
||||
|
||||
:param vm: VM instance
|
||||
:param node: Node instance
|
||||
"""
|
||||
|
||||
self._vms.add(vm)
|
||||
self._nodes.add(node)
|
||||
|
||||
def remove_vm(self, vm):
|
||||
def remove_node(self, node):
|
||||
"""
|
||||
Removes a VM from the project.
|
||||
In theory this should be called by the VM manager.
|
||||
Removes a node from the project.
|
||||
In theory this should be called by the node manager.
|
||||
|
||||
:param vm: VM instance
|
||||
:param node: Node instance
|
||||
"""
|
||||
|
||||
if vm in self._vms:
|
||||
self._vms.remove(vm)
|
||||
if node in self._nodes:
|
||||
self._nodes.remove(node)
|
||||
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
@ -353,8 +353,8 @@ class Project:
|
||||
"""
|
||||
|
||||
tasks = []
|
||||
for vm in self._vms:
|
||||
tasks.append(asyncio.async(vm.manager.close_vm(vm.id)))
|
||||
for node in self._nodes:
|
||||
tasks.append(asyncio.async(node.manager.close_node(node.id)))
|
||||
|
||||
if tasks:
|
||||
done, _ = yield from asyncio.wait(tasks)
|
||||
@ -362,7 +362,7 @@ class Project:
|
||||
try:
|
||||
future.result()
|
||||
except (Exception, GeneratorExit) as e:
|
||||
log.error("Could not close VM or device {}".format(e), exc_info=1)
|
||||
log.error("Could not close node {}".format(e), exc_info=1)
|
||||
|
||||
if cleanup and os.path.exists(self.path):
|
||||
try:
|
||||
@ -378,7 +378,7 @@ class Project:
|
||||
if self._used_udp_ports:
|
||||
log.warning("Project {} has UDP ports still in use: {}".format(self.id, self._used_udp_ports))
|
||||
|
||||
# clean the remaining ports that have not been cleaned by their respective VM or device.
|
||||
# clean the remaining ports that have not been cleaned by their respective node.
|
||||
port_manager = PortManager.instance()
|
||||
for port in self._used_tcp_ports.copy():
|
||||
port_manager.release_tcp_port(port, self)
|
||||
@ -391,10 +391,10 @@ class Project:
|
||||
Writes project changes on disk
|
||||
"""
|
||||
|
||||
while self._vms_to_destroy:
|
||||
vm = self._vms_to_destroy.pop()
|
||||
yield from vm.delete()
|
||||
self.remove_vm(vm)
|
||||
while self._nodes_to_destroy:
|
||||
node = self._nodes_to_destroy.pop()
|
||||
yield from node.delete()
|
||||
self.remove_node(node)
|
||||
for module in self.compute():
|
||||
yield from module.instance().project_committed(self)
|
||||
|
||||
@ -427,7 +427,7 @@ class Project:
|
||||
|
||||
def compute(self):
|
||||
"""
|
||||
Returns all loaded VM compute.
|
||||
Returns all loaded modules from compute.
|
||||
"""
|
||||
|
||||
# We import it at the last time to avoid circular dependencies
|
||||
@ -571,7 +571,7 @@ class Project:
|
||||
Import a project contain in a zip file
|
||||
|
||||
:param stream: A io.BytesIO of the zipfile
|
||||
:param gns3vm: True move docker, iou and qemu to the GNS3 VM
|
||||
:param gns3vm: True move Docker, IOU and Qemu to the GNS3 VM
|
||||
"""
|
||||
|
||||
with zipfile.ZipFile(stream) as myzip:
|
||||
@ -608,7 +608,7 @@ class Project:
|
||||
vm_directory = os.path.join(self.path, "servers", "vm", "project-files")
|
||||
vm_server_use = False
|
||||
|
||||
for module, device_type in modules_to_vm.items():
|
||||
for module, vm_type in modules_to_vm.items():
|
||||
module_directory = os.path.join(self.path, "project-files", module)
|
||||
if os.path.exists(module_directory):
|
||||
os.makedirs(vm_directory, exist_ok=True)
|
||||
@ -616,7 +616,7 @@ class Project:
|
||||
|
||||
# Patch node to use the GNS3 VM
|
||||
for node in topology["topology"]["nodes"]:
|
||||
if node["type"] == device_type:
|
||||
if node["type"] == vm_type:
|
||||
node["server_id"] = 2
|
||||
vm_server_use = True
|
||||
|
||||
|
@ -94,7 +94,7 @@ class ProjectManager:
|
||||
raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't exist".format(project_id))
|
||||
del self._projects[project_id]
|
||||
|
||||
def check_hardware_virtualization(self, source_vm):
|
||||
def check_hardware_virtualization(self, source_node):
|
||||
"""
|
||||
Checks if hardware virtualization can be used.
|
||||
|
||||
@ -102,9 +102,9 @@ class ProjectManager:
|
||||
"""
|
||||
|
||||
for project in self._projects.values():
|
||||
for vm in project.vms:
|
||||
if vm == source_vm:
|
||||
for node in project.nodes:
|
||||
if node == source_node:
|
||||
continue
|
||||
if vm.hw_virtualization and vm.__class__.__name__ != source_vm.__class__.__name__:
|
||||
if node.hw_virtualization and node.__class__.__name__ != source_node.__class__.__name__:
|
||||
return False
|
||||
return True
|
||||
|
@ -37,7 +37,7 @@ log = logging.getLogger(__name__)
|
||||
|
||||
class Qemu(BaseManager):
|
||||
|
||||
_VM_CLASS = QemuVM
|
||||
_NODE_CLASS = QemuVM
|
||||
|
||||
@staticmethod
|
||||
@asyncio.coroutine
|
||||
@ -218,10 +218,10 @@ class Qemu(BaseManager):
|
||||
@staticmethod
|
||||
def get_legacy_vm_workdir(legacy_vm_id, name):
|
||||
"""
|
||||
Returns the name of the legacy working directory name for a VM.
|
||||
Returns the name of the legacy working directory name for a node.
|
||||
|
||||
:param legacy_vm_id: legacy VM identifier (integer)
|
||||
:param: VM name (not used)
|
||||
:param: node name (not used)
|
||||
|
||||
:returns: working directory name
|
||||
"""
|
||||
|
@ -19,9 +19,9 @@
|
||||
Custom exceptions for the Qemu module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class QemuError(VMError):
|
||||
class QemuError(NodeError):
|
||||
|
||||
pass
|
||||
|
@ -36,7 +36,7 @@ from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_tap import NIOTAP
|
||||
from ..nios.nio_nat import NIONAT
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
|
||||
from ...utils.asyncio import monitor_process
|
||||
from ...utils.images import md5sum
|
||||
@ -48,14 +48,14 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QemuVM(BaseVM):
|
||||
class QemuVM(BaseNode):
|
||||
module_name = 'qemu'
|
||||
|
||||
"""
|
||||
QEMU VM implementation.
|
||||
|
||||
:param name: Qemu VM name
|
||||
:param vm_id: Qemu VM identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Manager instance
|
||||
:param console: TCP console port
|
||||
@ -64,9 +64,9 @@ class QemuVM(BaseVM):
|
||||
:param console: TCP console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, linked_clone=True, qemu_path=None, console=None, console_type="telnet", platform=None):
|
||||
def __init__(self, name, node_id, project, manager, linked_clone=True, qemu_path=None, console=None, console_type="telnet", platform=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console, console_type=console_type)
|
||||
super().__init__(name, node_id, project, manager, console=console, console_type=console_type)
|
||||
server_config = manager.config.get_section_config("Server")
|
||||
self._host = server_config.get("host", "127.0.0.1")
|
||||
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
|
||||
@ -1448,7 +1448,7 @@ class QemuVM(BaseVM):
|
||||
def __json__(self):
|
||||
answer = {
|
||||
"project_id": self.project.id,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"vm_directory": self.working_dir
|
||||
}
|
||||
# Qemu has a long list of options. The JSON schema is the single source of information
|
||||
|
@ -35,7 +35,7 @@ from .virtualbox_error import VirtualBoxError
|
||||
|
||||
class VirtualBox(BaseManager):
|
||||
|
||||
_VM_CLASS = VirtualBoxVM
|
||||
_NODE_CLASS = VirtualBoxVM
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@ -160,7 +160,7 @@ class VirtualBox(BaseManager):
|
||||
Gets VirtualBox VM list.
|
||||
"""
|
||||
|
||||
vms = []
|
||||
vbox_vms = []
|
||||
result = yield from self.execute("list", ["vms"])
|
||||
for line in result:
|
||||
if len(line) == 0 or line[0] != '"' or line[-1:] != "}":
|
||||
@ -182,16 +182,16 @@ class VirtualBox(BaseManager):
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
vms.append({"vmname": vmname, "ram": ram})
|
||||
return vms
|
||||
vbox_vms.append({"vmname": vmname, "ram": ram})
|
||||
return vbox_vms
|
||||
|
||||
@staticmethod
|
||||
def get_legacy_vm_workdir(legacy_vm_id, name):
|
||||
"""
|
||||
Returns the name of the legacy working directory name for a VM.
|
||||
Returns the name of the legacy working directory name for a node.
|
||||
|
||||
:param legacy_vm_id: legacy VM identifier (not used)
|
||||
:param name: VM name
|
||||
:param legacy_vm_id: legacy node identifier (not used)
|
||||
:param name: node name
|
||||
|
||||
:returns: working directory name
|
||||
"""
|
||||
|
@ -19,9 +19,9 @@
|
||||
Custom exceptions for the VirtualBox module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class VirtualBoxError(VMError):
|
||||
class VirtualBoxError(NodeError):
|
||||
|
||||
pass
|
||||
|
@ -35,7 +35,7 @@ from .virtualbox_error import VirtualBoxError
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_nat import NIONAT
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
import msvcrt
|
||||
@ -45,15 +45,15 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VirtualBoxVM(BaseVM):
|
||||
class VirtualBoxVM(BaseNode):
|
||||
|
||||
"""
|
||||
VirtualBox VM implementation.
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, vmname, linked_clone, console=None, adapters=0):
|
||||
def __init__(self, name, node_id, project, manager, vmname, linked_clone, console=None, adapters=0):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console)
|
||||
super().__init__(name, node_id, project, manager, console=console)
|
||||
|
||||
self._maximum_adapters = 8
|
||||
self._linked_clone = linked_clone
|
||||
@ -75,7 +75,7 @@ class VirtualBoxVM(BaseVM):
|
||||
def __json__(self):
|
||||
|
||||
json = {"name": self.name,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"console": self.console,
|
||||
"project_id": self.project.id,
|
||||
"vmname": self.vmname,
|
||||
|
@ -43,7 +43,7 @@ from gns3server.compute.vmware.nio_vmnet import NIOVMNET
|
||||
|
||||
class VMware(BaseManager):
|
||||
|
||||
_VM_CLASS = VMwareVM
|
||||
_NODE_CLASS = VMwareVM
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@ -324,8 +324,8 @@ class VMware(BaseManager):
|
||||
vmnet_interfaces = self._get_vmnet_interfaces()
|
||||
|
||||
# remove vmnets already in use
|
||||
for vm in self._vms.values():
|
||||
for used_vmnet in vm.vmnets:
|
||||
for vmware_vm in self._nodes.values():
|
||||
for used_vmnet in vmware_vm.vmnets:
|
||||
if used_vmnet in vmnet_interfaces:
|
||||
log.debug("{} is already in use".format(used_vmnet))
|
||||
vmnet_interfaces.remove(used_vmnet)
|
||||
@ -558,7 +558,7 @@ class VMware(BaseManager):
|
||||
"""
|
||||
|
||||
vm_entries = {}
|
||||
vms = []
|
||||
vmware_vms = []
|
||||
try:
|
||||
log.debug('Reading VMware inventory file "{}"'.format(inventory_path))
|
||||
pairs = self.parse_vmware_file(inventory_path)
|
||||
@ -577,8 +577,8 @@ class VMware(BaseManager):
|
||||
for vm_settings in vm_entries.values():
|
||||
if "displayname" in vm_settings and "config" in vm_settings:
|
||||
log.debug('Found VM named "{}" with VMX file "{}"'.format(vm_settings["displayname"], vm_settings["config"]))
|
||||
vms.append({"vmname": vm_settings["displayname"], "vmx_path": vm_settings["config"]})
|
||||
return vms
|
||||
vmware_vms.append({"vmname": vm_settings["displayname"], "vmx_path": vm_settings["config"]})
|
||||
return vmware_vms
|
||||
|
||||
def _get_vms_from_directory(self, directory):
|
||||
"""
|
||||
@ -589,7 +589,7 @@ class VMware(BaseManager):
|
||||
:returns: list of VMs
|
||||
"""
|
||||
|
||||
vms = []
|
||||
vmware_vms = []
|
||||
for path, _, filenames in os.walk(directory):
|
||||
for filename in filenames:
|
||||
if os.path.splitext(filename)[1] == ".vmx":
|
||||
@ -599,11 +599,11 @@ class VMware(BaseManager):
|
||||
pairs = self.parse_vmware_file(vmx_path)
|
||||
if "displayname" in pairs:
|
||||
log.debug('Found VM named "{}"'.format(pairs["displayname"]))
|
||||
vms.append({"vmname": pairs["displayname"], "vmx_path": vmx_path})
|
||||
vmware_vms.append({"vmname": pairs["displayname"], "vmx_path": vmx_path})
|
||||
except OSError as e:
|
||||
log.warning('Could not read VMware VMX file "{}": {}'.format(vmx_path, e))
|
||||
continue
|
||||
return vms
|
||||
return vmware_vms
|
||||
|
||||
@staticmethod
|
||||
def get_vmware_inventory_path():
|
||||
@ -687,7 +687,7 @@ class VMware(BaseManager):
|
||||
default_vm_path = pairs["prefvmx.defaultvmpath"]
|
||||
if not os.path.isdir(default_vm_path):
|
||||
raise VMwareError('Could not find the default VM directory: "{}"'.format(default_vm_path))
|
||||
vms = self._get_vms_from_directory(default_vm_path)
|
||||
vmware_vms = self._get_vms_from_directory(default_vm_path)
|
||||
|
||||
# looks for VMX paths in the preferences file in case not all VMs are in the default directory
|
||||
for key, value in pairs.items():
|
||||
@ -696,12 +696,12 @@ class VMware(BaseManager):
|
||||
display_name = "pref.mruVM{}.displayName".format(m.group(1))
|
||||
if display_name in pairs:
|
||||
found = False
|
||||
for vm in vms:
|
||||
if vm["vmname"] == display_name:
|
||||
for vmware_vm in vmware_vms:
|
||||
if vmware_vm["vmname"] == display_name:
|
||||
found = True
|
||||
if found is False:
|
||||
vms.append({"vmname": pairs[display_name], "vmx_path": value})
|
||||
return vms
|
||||
vmware_vms.append({"vmname": pairs[display_name], "vmx_path": value})
|
||||
return vmware_vms
|
||||
|
||||
@staticmethod
|
||||
def _get_linux_vmware_binary():
|
||||
|
@ -19,9 +19,9 @@
|
||||
Custom exceptions for the VMware module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class VMwareError(VMError):
|
||||
class VMwareError(NodeError):
|
||||
|
||||
pass
|
||||
|
@ -34,7 +34,7 @@ from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_nat import NIONAT
|
||||
from .nio_vmnet import NIOVMNET
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
import msvcrt
|
||||
@ -44,15 +44,15 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VMwareVM(BaseVM):
|
||||
class VMwareVM(BaseNode):
|
||||
|
||||
"""
|
||||
VMware VM implementation.
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, vmx_path, linked_clone, console=None):
|
||||
def __init__(self, name, node_id, project, manager, vmx_path, linked_clone, console=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console)
|
||||
super().__init__(name, node_id, project, manager, console=console)
|
||||
|
||||
self._linked_clone = linked_clone
|
||||
self._vmx_pairs = OrderedDict()
|
||||
@ -75,12 +75,12 @@ class VMwareVM(BaseVM):
|
||||
self._use_any_adapter = False
|
||||
|
||||
if not os.path.exists(vmx_path):
|
||||
raise VMwareError('VMware VM "{name}" [{id}]: could not find VMX file "{vmx_path}"'.format(name=name, id=vm_id, vmx_path=vmx_path))
|
||||
raise VMwareError('VMware VM "{name}" [{id}]: could not find VMX file "{vmx_path}"'.format(name=name, id=node_id, vmx_path=vmx_path))
|
||||
|
||||
def __json__(self):
|
||||
|
||||
json = {"name": self.name,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"console": self.console,
|
||||
"project_id": self.project.id,
|
||||
"vmx_path": self.vmx_path,
|
||||
|
@ -29,7 +29,7 @@ from .vpcs_vm import VPCSVM
|
||||
|
||||
class VPCS(BaseManager):
|
||||
|
||||
_VM_CLASS = VPCSVM
|
||||
_NODE_CLASS = VPCSVM
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@ -38,55 +38,55 @@ class VPCS(BaseManager):
|
||||
self._used_mac_ids = {}
|
||||
|
||||
@asyncio.coroutine
|
||||
def create_vm(self, *args, **kwargs):
|
||||
def create_node(self, *args, **kwargs):
|
||||
"""
|
||||
Creates a new VPCS VM.
|
||||
|
||||
:returns: VPCSVM instance
|
||||
"""
|
||||
|
||||
vm = yield from super().create_vm(*args, **kwargs)
|
||||
self._free_mac_ids.setdefault(vm.project.id, list(range(0, 255)))
|
||||
node = yield from super().create_node(*args, **kwargs)
|
||||
self._free_mac_ids.setdefault(node.project.id, list(range(0, 255)))
|
||||
try:
|
||||
self._used_mac_ids[vm.id] = self._free_mac_ids[vm.project.id].pop(0)
|
||||
self._used_mac_ids[node.id] = self._free_mac_ids[node.project.id].pop(0)
|
||||
except IndexError:
|
||||
raise VPCSError("Cannot create a new VPCS VM (limit of 255 VMs reached on this host)")
|
||||
return vm
|
||||
return node
|
||||
|
||||
@asyncio.coroutine
|
||||
def close_vm(self, vm_id, *args, **kwargs):
|
||||
def close_node(self, node_id, *args, **kwargs):
|
||||
"""
|
||||
Closes a VPCS VM.
|
||||
|
||||
:returns: VPCSVM instance
|
||||
"""
|
||||
|
||||
vm = self.get_vm(vm_id)
|
||||
if vm_id in self._used_mac_ids:
|
||||
i = self._used_mac_ids[vm_id]
|
||||
self._free_mac_ids[vm.project.id].insert(0, i)
|
||||
del self._used_mac_ids[vm_id]
|
||||
yield from super().close_vm(vm_id, *args, **kwargs)
|
||||
return vm
|
||||
node = self.get_node(node_id)
|
||||
if node_id in self._used_mac_ids:
|
||||
i = self._used_mac_ids[node_id]
|
||||
self._free_mac_ids[node.project.id].insert(0, i)
|
||||
del self._used_mac_ids[node_id]
|
||||
yield from super().close_node(node_id, *args, **kwargs)
|
||||
return node
|
||||
|
||||
def get_mac_id(self, vm_id):
|
||||
def get_mac_id(self, node_id):
|
||||
"""
|
||||
Get an unique VPCS MAC id (offset)
|
||||
|
||||
:param vm_id: VPCS VM identifier
|
||||
:param node_id: VPCS node identifier
|
||||
|
||||
:returns: VPCS MAC identifier
|
||||
"""
|
||||
|
||||
return self._used_mac_ids.get(vm_id, 1)
|
||||
return self._used_mac_ids.get(node_id, 1)
|
||||
|
||||
@staticmethod
|
||||
def get_legacy_vm_workdir(legacy_vm_id, name):
|
||||
"""
|
||||
Returns the name of the legacy working directory name for a VM.
|
||||
Returns the name of the legacy working directory name for a node.
|
||||
|
||||
:param legacy_vm_id: legacy VM identifier (integer)
|
||||
:param name: VM name (not used)
|
||||
:param legacy_vm_id: legacy node identifier (integer)
|
||||
:param name: node name (not used)
|
||||
|
||||
:returns: working directory name
|
||||
"""
|
||||
|
@ -19,9 +19,9 @@
|
||||
Custom exceptions for the VPCS module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
from ..node_error import NodeError
|
||||
|
||||
|
||||
class VPCSError(VMError):
|
||||
class VPCSError(NodeError):
|
||||
|
||||
pass
|
||||
|
@ -36,29 +36,29 @@ from .vpcs_error import VPCSError
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_tap import NIOTAP
|
||||
from ..base_vm import BaseVM
|
||||
from ..base_node import BaseNode
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VPCSVM(BaseVM):
|
||||
class VPCSVM(BaseNode):
|
||||
module_name = 'vpcs'
|
||||
|
||||
"""
|
||||
VPCS VM implementation.
|
||||
|
||||
:param name: VPCS VM name
|
||||
:param vm_id: VPCS VM identifier
|
||||
:param node_id: Node identifier
|
||||
:param project: Project instance
|
||||
:param manager: Manager instance
|
||||
:param console: TCP console port
|
||||
:param startup_script: content of the startup script file
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, console=None, startup_script=None):
|
||||
def __init__(self, name, node_id, project, manager, console=None, startup_script=None):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console)
|
||||
super().__init__(name, node_id, project, manager, console=console)
|
||||
self._process = None
|
||||
self._vpcs_stdout_file = ""
|
||||
self._vpcs_version = None
|
||||
@ -108,7 +108,7 @@ class VPCSVM(BaseVM):
|
||||
def __json__(self):
|
||||
|
||||
return {"name": self.name,
|
||||
"vm_id": self.id,
|
||||
"node_id": self.id,
|
||||
"vm_directory": self.working_dir,
|
||||
"status": self.status,
|
||||
"console": self._console,
|
||||
@ -145,7 +145,7 @@ class VPCSVM(BaseVM):
|
||||
path = shutil.which("vpcs")
|
||||
return path
|
||||
|
||||
@BaseVM.name.setter
|
||||
@BaseNode.name.setter
|
||||
def name(self, new_name):
|
||||
"""
|
||||
Sets the name of this VPCS VM.
|
||||
|
@ -77,7 +77,7 @@ class Controller:
|
||||
return
|
||||
for c in data["computes"]:
|
||||
compute_id = c.pop("compute_id")
|
||||
yield from self.addCompute(compute_id, **c)
|
||||
yield from self.add_compute(compute_id, **c)
|
||||
|
||||
def isEnabled(self):
|
||||
"""
|
||||
@ -87,7 +87,7 @@ class Controller:
|
||||
return Config.instance().get_section_config("Server").getboolean("controller")
|
||||
|
||||
@asyncio.coroutine
|
||||
def addCompute(self, compute_id, **kwargs):
|
||||
def add_compute(self, compute_id, **kwargs):
|
||||
"""
|
||||
Add a server to the dictionnary of computes controlled by GNS3
|
||||
|
||||
@ -126,11 +126,11 @@ class Controller:
|
||||
project = Project(project_id=project_id, **kwargs)
|
||||
self._projects[project.id] = project
|
||||
for compute in self._computes.values():
|
||||
yield from project.addCompute(compute)
|
||||
yield from project.add_compute(compute)
|
||||
return self._projects[project.id]
|
||||
return self._projects[project_id]
|
||||
|
||||
def getProject(self, project_id):
|
||||
def get_project(self, project_id):
|
||||
"""
|
||||
Return a project or raise a 404
|
||||
"""
|
||||
|
@ -29,18 +29,18 @@ class Link:
|
||||
|
||||
def __init__(self, project):
|
||||
self._id = str(uuid.uuid4())
|
||||
self._vms = []
|
||||
self._nodes = []
|
||||
self._project = project
|
||||
self._capturing = False
|
||||
self._capture_file_name = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def addVM(self, vm, adapter_number, port_number):
|
||||
def add_node(self, node, adapter_number, port_number):
|
||||
"""
|
||||
Add a VM to the link
|
||||
Add a node to the link
|
||||
"""
|
||||
self._vms.append({
|
||||
"vm": vm,
|
||||
self._nodes.append({
|
||||
"node": node,
|
||||
"adapter_number": adapter_number,
|
||||
"port_number": port_number
|
||||
})
|
||||
@ -107,12 +107,12 @@ class Link:
|
||||
:returns: File name for a capture on this link
|
||||
"""
|
||||
capture_file_name = "{}_{}-{}_to_{}_{}-{}".format(
|
||||
self._vms[0]["vm"].name,
|
||||
self._vms[0]["adapter_number"],
|
||||
self._vms[0]["port_number"],
|
||||
self._vms[1]["vm"].name,
|
||||
self._vms[1]["adapter_number"],
|
||||
self._vms[1]["port_number"])
|
||||
self._nodes[0]["node"].name,
|
||||
self._nodes[0]["adapter_number"],
|
||||
self._nodes[0]["port_number"],
|
||||
self._nodes[1]["node"].name,
|
||||
self._nodes[1]["adapter_number"],
|
||||
self._nodes[1]["port_number"])
|
||||
return re.sub("[^0-9A-Za-z_-]", "", capture_file_name) + ".pcap"
|
||||
|
||||
@property
|
||||
@ -135,14 +135,14 @@ class Link:
|
||||
|
||||
def __json__(self):
|
||||
res = []
|
||||
for side in self._vms:
|
||||
for side in self._nodes:
|
||||
res.append({
|
||||
"vm_id": side["vm"].id,
|
||||
"node_id": side["node"].id,
|
||||
"adapter_number": side["adapter_number"],
|
||||
"port_number": side["port_number"]
|
||||
})
|
||||
return {
|
||||
"vms": res, "link_id": self._id,
|
||||
"nodes": res, "link_id": self._id,
|
||||
"capturing": self._capturing,
|
||||
"capture_file_name": self._capture_file_name,
|
||||
"capture_file_path": self.capture_file_path
|
||||
|
@ -21,29 +21,29 @@ import copy
|
||||
import uuid
|
||||
|
||||
|
||||
class VM:
|
||||
class Node:
|
||||
|
||||
def __init__(self, project, compute, vm_id=None, vm_type=None, name=None, console=None, console_type="telnet", properties={}):
|
||||
def __init__(self, project, compute, node_id=None, node_type=None, name=None, console=None, console_type="telnet", properties={}):
|
||||
"""
|
||||
:param project: Project of the VM
|
||||
:param compute: Hypervisor server where the server will run
|
||||
:param vm_id: UUID of the vm. Integer id
|
||||
:param vm_type: Type of emulator
|
||||
:param name: Name of the vm
|
||||
:param project: Project of the node
|
||||
:param compute: Compute server where the server will run
|
||||
:param node_id: UUID of the node (integer)
|
||||
:param node_type: Type of emulator
|
||||
:param name: Name of the node
|
||||
:param console: TCP port of the console
|
||||
:param console_type: Type of the console (telnet, vnc, serial..)
|
||||
:param properties: Emulator specific properties of the VM
|
||||
:param properties: Emulator specific properties of the node
|
||||
"""
|
||||
|
||||
if vm_id is None:
|
||||
if node_id is None:
|
||||
self._id = str(uuid.uuid4())
|
||||
else:
|
||||
self._id = vm_id
|
||||
self._id = node_id
|
||||
|
||||
self._name = name
|
||||
self._project = project
|
||||
self._compute = compute
|
||||
self._vm_type = vm_type
|
||||
self._node_type = node_type
|
||||
self._console = console
|
||||
self._console_type = console_type
|
||||
self._properties = properties
|
||||
@ -57,8 +57,8 @@ class VM:
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def vm_type(self):
|
||||
return self._vm_type
|
||||
def node_type(self):
|
||||
return self._node_type
|
||||
|
||||
@property
|
||||
def console(self):
|
||||
@ -90,24 +90,24 @@ class VM:
|
||||
@asyncio.coroutine
|
||||
def create(self):
|
||||
"""
|
||||
Create the VM on the compute Node
|
||||
Create the node on the compute server
|
||||
"""
|
||||
data = self._vm_data()
|
||||
data["vm_id"] = self._id
|
||||
response = yield from self._compute.post("/projects/{}/{}/vms".format(self._project.id, self._vm_type), data=data)
|
||||
self._parse_vm_response(response)
|
||||
data = self._node_data()
|
||||
data["node_id"] = self._id
|
||||
response = yield from self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data)
|
||||
self._parse_node_response(response)
|
||||
|
||||
@asyncio.coroutine
|
||||
def update(self, name=None, console=None, console_type="telnet", properties={}):
|
||||
"""
|
||||
Update the VM on the compute Node
|
||||
Update the node on the compute server
|
||||
|
||||
:param vm_id: UUID of the vm. Integer id
|
||||
:param vm_type: Type of emulator
|
||||
:param name: Name of the vm
|
||||
:param node_id: UUID of the node
|
||||
:param node_type: Type of emulator
|
||||
:param name: Name of the node
|
||||
:param console: TCP port of the console
|
||||
:param console_type: Type of the console (telnet, vnc, serial..)
|
||||
:param properties: Emulator specific properties of the VM
|
||||
:param properties: Emulator specific properties of the node
|
||||
|
||||
"""
|
||||
if name:
|
||||
@ -119,25 +119,25 @@ class VM:
|
||||
if properties != {}:
|
||||
self._properties = properties
|
||||
|
||||
data = self._vm_data()
|
||||
data = self._node_data()
|
||||
response = yield from self.put(None, data=data)
|
||||
self._parse_vm_response(response)
|
||||
self._parse_node_response(response)
|
||||
|
||||
def _parse_vm_response(self, response):
|
||||
def _parse_node_response(self, response):
|
||||
"""
|
||||
Update the object with the remote VM object
|
||||
Update the object with the remote node object
|
||||
"""
|
||||
for key, value in response.json.items():
|
||||
if key == "console":
|
||||
self._console = value
|
||||
elif key in ["console_type", "name", "vm_id", "project_id", "vm_directory", "command_line", "status"]:
|
||||
elif key in ["console_type", "name", "node_id", "project_id", "node_directory", "command_line", "status"]:
|
||||
pass
|
||||
else:
|
||||
self._properties[key] = value
|
||||
|
||||
def _vm_data(self):
|
||||
def _node_data(self):
|
||||
"""
|
||||
Prepare VM data to send to the remote controller
|
||||
Prepare node data to send to the remote controller
|
||||
"""
|
||||
data = copy.copy(self._properties)
|
||||
data["name"] = self._name
|
||||
@ -157,50 +157,50 @@ class VM:
|
||||
@asyncio.coroutine
|
||||
def start(self):
|
||||
"""
|
||||
Start a VM
|
||||
Start a node
|
||||
"""
|
||||
yield from self.post("/start")
|
||||
|
||||
@asyncio.coroutine
|
||||
def stop(self):
|
||||
"""
|
||||
Stop a VM
|
||||
Stop a node
|
||||
"""
|
||||
yield from self.post("/stop")
|
||||
|
||||
@asyncio.coroutine
|
||||
def suspend(self):
|
||||
"""
|
||||
Suspend a VM
|
||||
Suspend a node
|
||||
"""
|
||||
yield from self.post("/suspend")
|
||||
|
||||
@asyncio.coroutine
|
||||
def reload(self):
|
||||
"""
|
||||
Suspend a VM
|
||||
Suspend a node
|
||||
"""
|
||||
yield from self.post("/reload")
|
||||
|
||||
@asyncio.coroutine
|
||||
def post(self, path, data=None):
|
||||
"""
|
||||
HTTP post on the VM
|
||||
HTTP post on the node
|
||||
"""
|
||||
if data:
|
||||
return (yield from self._compute.post("/projects/{}/{}/vms/{}{}".format(self._project.id, self._vm_type, self._id, path), data=data))
|
||||
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data))
|
||||
else:
|
||||
return (yield from self._compute.post("/projects/{}/{}/vms/{}{}".format(self._project.id, self._vm_type, self._id, path)))
|
||||
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path)))
|
||||
|
||||
@asyncio.coroutine
|
||||
def put(self, path, data=None):
|
||||
"""
|
||||
HTTP post on the VM
|
||||
HTTP post on the node
|
||||
"""
|
||||
if path is None:
|
||||
path = "/projects/{}/{}/vms/{}".format(self._project.id, self._vm_type, self._id)
|
||||
path = "/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id)
|
||||
else:
|
||||
path = "/projects/{}/{}/vms/{}{}".format(self._project.id, self._vm_type, self._id, path)
|
||||
path = "/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path)
|
||||
if data:
|
||||
return (yield from self._compute.put(path, data=data))
|
||||
else:
|
||||
@ -209,22 +209,22 @@ class VM:
|
||||
@asyncio.coroutine
|
||||
def delete(self, path=None):
|
||||
"""
|
||||
HTTP post on the VM
|
||||
HTTP post on the node
|
||||
"""
|
||||
if path is None:
|
||||
return (yield from self._compute.delete("/projects/{}/{}/vms/{}".format(self._project.id, self._vm_type, self._id)))
|
||||
return (yield from self._compute.delete("/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id)))
|
||||
else:
|
||||
return (yield from self._compute.delete("/projects/{}/{}/vms/{}{}".format(self._project.id, self._vm_type, self._id, path)))
|
||||
return (yield from self._compute.delete("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path)))
|
||||
|
||||
def __repr__(self):
|
||||
return "<gns3server.controller.VM {} {}>".format(self._vm_type, self._name)
|
||||
return "<gns3server.controller.Node {} {}>".format(self._node_type, self._name)
|
||||
|
||||
def __json__(self):
|
||||
return {
|
||||
"compute_id": self._compute.id,
|
||||
"project_id": self._project.id,
|
||||
"vm_id": self._id,
|
||||
"vm_type": self._vm_type,
|
||||
"node_id": self._id,
|
||||
"node_type": self._node_type,
|
||||
"name": self._name,
|
||||
"console": self._console,
|
||||
"console_type": self._console_type,
|
@ -18,10 +18,12 @@
|
||||
import os
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import shutil
|
||||
|
||||
from uuid import UUID, uuid4
|
||||
from contextlib import contextmanager
|
||||
|
||||
from .vm import VM
|
||||
from .node import Node
|
||||
from .udp_link import UDPLink
|
||||
from ..notification_queue import NotificationQueue
|
||||
from ..config import Config
|
||||
@ -56,7 +58,7 @@ class Project:
|
||||
|
||||
self._temporary = temporary
|
||||
self._computes = set()
|
||||
self._vms = {}
|
||||
self._nodes = {}
|
||||
self._links = {}
|
||||
self._listeners = set()
|
||||
|
||||
@ -101,42 +103,42 @@ class Project:
|
||||
return path
|
||||
|
||||
@asyncio.coroutine
|
||||
def addCompute(self, compute):
|
||||
def add_compute(self, compute):
|
||||
self._computes.add(compute)
|
||||
yield from compute.post("/projects", self)
|
||||
|
||||
@asyncio.coroutine
|
||||
def addVM(self, compute, vm_id, **kwargs):
|
||||
def add_node(self, compute, node_id, **kwargs):
|
||||
"""
|
||||
Create a vm or return an existing vm
|
||||
Create a node or return an existing node
|
||||
|
||||
:param kwargs: See the documentation of VM
|
||||
:param kwargs: See the documentation of node
|
||||
"""
|
||||
if vm_id not in self._vms:
|
||||
vm = VM(self, compute, vm_id=vm_id, **kwargs)
|
||||
yield from vm.create()
|
||||
self._vms[vm.id] = vm
|
||||
return vm
|
||||
return self._vms[vm_id]
|
||||
if node_id not in self._nodes:
|
||||
node = Node(self, compute, node_id=node_id, **kwargs)
|
||||
yield from node.create()
|
||||
self._nodes[node.id] = node
|
||||
return node
|
||||
return self._nodes[node_id]
|
||||
|
||||
def getVM(self, vm_id):
|
||||
def get_node(self, node_id):
|
||||
"""
|
||||
Return the VM or raise a 404 if the VM is unknown
|
||||
Return the node or raise a 404 if the node is unknown
|
||||
"""
|
||||
try:
|
||||
return self._vms[vm_id]
|
||||
return self._nodes[node_id]
|
||||
except KeyError:
|
||||
raise aiohttp.web.HTTPNotFound(text="VM ID {} doesn't exist".format(vm_id))
|
||||
raise aiohttp.web.HTTPNotFound(text="Node ID {} doesn't exist".format(node_id))
|
||||
|
||||
@property
|
||||
def vms(self):
|
||||
def nodes(self):
|
||||
"""
|
||||
:returns: Dictionnary of the VMS
|
||||
:returns: Dictionary of the nodes
|
||||
"""
|
||||
return self._vms
|
||||
return self._nodes
|
||||
|
||||
@asyncio.coroutine
|
||||
def addLink(self):
|
||||
def add_link(self):
|
||||
"""
|
||||
Create a link. By default the link is empty
|
||||
"""
|
||||
@ -144,9 +146,9 @@ class Project:
|
||||
self._links[link.id] = link
|
||||
return link
|
||||
|
||||
def getLink(self, link_id):
|
||||
def get_link(self, link_id):
|
||||
"""
|
||||
Return the Link or raise a 404 if the VM is unknown
|
||||
Return the Link or raise a 404 if the link is unknown
|
||||
"""
|
||||
try:
|
||||
return self._links[link_id]
|
||||
@ -156,7 +158,7 @@ class Project:
|
||||
@property
|
||||
def links(self):
|
||||
"""
|
||||
:returns: Dictionnary of the Links
|
||||
:returns: Dictionary of the Links
|
||||
"""
|
||||
return self._links
|
||||
|
||||
@ -174,7 +176,7 @@ class Project:
|
||||
def delete(self):
|
||||
for compute in self._computes:
|
||||
yield from compute.delete("/projects/{}".format(self._id))
|
||||
shutil.rmtree(self.path)
|
||||
shutil.rmtree(self.path, ignore_errors=True)
|
||||
|
||||
@contextmanager
|
||||
def queue(self):
|
||||
|
@ -26,58 +26,58 @@ class UDPLink(Link):
|
||||
|
||||
def __init__(self, project):
|
||||
super().__init__(project)
|
||||
self._capture_vm = None
|
||||
self._capture_node = None
|
||||
|
||||
@asyncio.coroutine
|
||||
def create(self):
|
||||
"""
|
||||
Create the link on the VMs
|
||||
Create the link on the nodes
|
||||
"""
|
||||
|
||||
vm1 = self._vms[0]["vm"]
|
||||
adapter_number1 = self._vms[0]["adapter_number"]
|
||||
port_number1 = self._vms[0]["port_number"]
|
||||
vm2 = self._vms[1]["vm"]
|
||||
adapter_number2 = self._vms[1]["adapter_number"]
|
||||
port_number2 = self._vms[1]["port_number"]
|
||||
node1 = self._nodes[0]["node"]
|
||||
adapter_number1 = self._nodes[0]["adapter_number"]
|
||||
port_number1 = self._nodes[0]["port_number"]
|
||||
node2 = self._nodes[1]["node"]
|
||||
adapter_number2 = self._nodes[1]["adapter_number"]
|
||||
port_number2 = self._nodes[1]["port_number"]
|
||||
|
||||
# Reserve a UDP port on both side
|
||||
response = yield from vm1.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
||||
self._vm1_port = response.json["udp_port"]
|
||||
response = yield from vm2.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
||||
self._vm2_port = response.json["udp_port"]
|
||||
response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
||||
self._node1_port = response.json["udp_port"]
|
||||
response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
||||
self._node2_port = response.json["udp_port"]
|
||||
|
||||
# Create the tunnel on both side
|
||||
data = {
|
||||
"lport": self._vm1_port,
|
||||
"rhost": vm2.compute.host,
|
||||
"rport": self._vm2_port,
|
||||
"lport": self._node1_port,
|
||||
"rhost": node2.compute.host,
|
||||
"rport": self._node2_port,
|
||||
"type": "nio_udp"
|
||||
}
|
||||
yield from vm1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=data)
|
||||
yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=data)
|
||||
|
||||
data = {
|
||||
"lport": self._vm2_port,
|
||||
"rhost": vm1.compute.host,
|
||||
"rport": self._vm1_port,
|
||||
"lport": self._node2_port,
|
||||
"rhost": node1.compute.host,
|
||||
"rport": self._node1_port,
|
||||
"type": "nio_udp"
|
||||
}
|
||||
yield from vm2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=data)
|
||||
yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=data)
|
||||
|
||||
@asyncio.coroutine
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the link and free the ressources
|
||||
Delete the link and free the resources
|
||||
"""
|
||||
vm1 = self._vms[0]["vm"]
|
||||
adapter_number1 = self._vms[0]["adapter_number"]
|
||||
port_number1 = self._vms[0]["port_number"]
|
||||
vm2 = self._vms[1]["vm"]
|
||||
adapter_number2 = self._vms[1]["adapter_number"]
|
||||
port_number2 = self._vms[1]["port_number"]
|
||||
node1 = self._nodes[0]["node"]
|
||||
adapter_number1 = self._nodes[0]["adapter_number"]
|
||||
port_number1 = self._nodes[0]["port_number"]
|
||||
node2 = self._nodes[1]["node"]
|
||||
adapter_number2 = self._nodes[1]["adapter_number"]
|
||||
port_number2 = self._nodes[1]["port_number"]
|
||||
|
||||
yield from vm1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1))
|
||||
yield from vm2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2))
|
||||
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1))
|
||||
yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2))
|
||||
|
||||
@asyncio.coroutine
|
||||
def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None):
|
||||
@ -86,12 +86,12 @@ class UDPLink(Link):
|
||||
"""
|
||||
if not capture_file_name:
|
||||
capture_file_name = self.default_capture_file_name()
|
||||
self._capture_vm = self._choose_capture_side()
|
||||
self._capture_node = self._choose_capture_side()
|
||||
data = {
|
||||
"capture_file_name": capture_file_name,
|
||||
"data_link_type": data_link_type
|
||||
}
|
||||
yield from self._capture_vm["vm"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_vm["adapter_number"], port_number=self._capture_vm["port_number"]), data=data)
|
||||
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data)
|
||||
yield from super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name)
|
||||
|
||||
@asyncio.coroutine
|
||||
@ -99,9 +99,9 @@ class UDPLink(Link):
|
||||
"""
|
||||
Stop capture on a link
|
||||
"""
|
||||
if self._capture_vm:
|
||||
yield from self._capture_vm["vm"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_vm["adapter_number"], port_number=self._capture_vm["port_number"]))
|
||||
self._capture_vm = None
|
||||
if self._capture_node:
|
||||
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]))
|
||||
self._capture_node = None
|
||||
yield from super().stop_capture()
|
||||
|
||||
def _choose_capture_side(self):
|
||||
@ -111,17 +111,17 @@ class UDPLink(Link):
|
||||
The ideal candidate is a node who support capture on controller
|
||||
server
|
||||
|
||||
:returns: VM where the capture should run
|
||||
:returns: Node where the capture should run
|
||||
"""
|
||||
|
||||
# For saving bandwith we use the local node first
|
||||
for vm in self._vms:
|
||||
if vm["vm"].compute.id == "local" and vm["vm"].vm_type not in ["qemu", "vpcs"]:
|
||||
return vm
|
||||
# use the local node first to save bandwidth
|
||||
for node in self._nodes:
|
||||
if node["node"].compute.id == "local" and node["node"].node_type not in ["qemu", "vpcs"]:
|
||||
return node
|
||||
|
||||
for vm in self._vms:
|
||||
if vm["vm"].vm_type not in ["qemu", "vpcs"]:
|
||||
return vm
|
||||
for node in self._nodes:
|
||||
if node["node"].node_type not in ["qemu", "vpcs"]:
|
||||
return node
|
||||
|
||||
raise aiohttp.web.HTTPConflict(text="Capture is not supported for this link")
|
||||
|
||||
@ -130,6 +130,6 @@ class UDPLink(Link):
|
||||
"""
|
||||
Return a FileStream of the Pcap from the compute node
|
||||
"""
|
||||
if self._capture_vm:
|
||||
compute = self._capture_vm["vm"].compute
|
||||
if self._capture_node:
|
||||
compute = self._capture_node["node"].compute
|
||||
return compute.streamFile(self._project, "tmp/captures/" + self._capture_file_name)
|
||||
|
@ -27,7 +27,7 @@ from ....schemas.docker import (
|
||||
DOCKER_UPDATE_SCHEMA,
|
||||
DOCKER_LIST_IMAGES_SCHEMA
|
||||
)
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....schemas.nio import NIO_SCHEMA
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ class DockerHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms",
|
||||
r"/projects/{project_id}/docker/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -63,35 +63,33 @@ class DockerHandler:
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def create(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = yield from docker_manager.create_vm(
|
||||
request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
image=request.json.pop("image"),
|
||||
start_command=request.json.get("start_command"),
|
||||
environment=request.json.get("environment"),
|
||||
adapters=request.json.get("adapters"),
|
||||
console=request.json.get("console"),
|
||||
console_type=request.json.get("console_type"),
|
||||
console_resolution=request.json.get("console_resolution", "1024x768"),
|
||||
console_http_port=request.json.get("console_http_port", 80),
|
||||
console_http_path=request.json.get("console_http_path", "/"),
|
||||
aux=request.json.get("aux")
|
||||
)
|
||||
vm = yield from docker_manager.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
image=request.json.pop("image"),
|
||||
start_command=request.json.get("start_command"),
|
||||
environment=request.json.get("environment"),
|
||||
adapters=request.json.get("adapters"),
|
||||
console=request.json.get("console"),
|
||||
console_type=request.json.get("console_type"),
|
||||
console_resolution=request.json.get("console_resolution", "1024x768"),
|
||||
console_http_port=request.json.get("console_http_port", 80),
|
||||
console_http_path=request.json.get("console_http_path", "/"),
|
||||
aux=request.json.get("aux"))
|
||||
for name, value in request.json.items():
|
||||
if name != "_vm_id":
|
||||
if hasattr(container, name) and getattr(container, name) != value:
|
||||
setattr(container, name, value)
|
||||
if name != "_node_id":
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
setattr(vm, name, value)
|
||||
|
||||
response.set_status(201)
|
||||
response.json(container)
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/start",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
"node_id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
@ -103,18 +101,16 @@ class DockerHandler:
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def start(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.start()
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.start()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/stop",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
"node_id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -126,18 +122,16 @@ class DockerHandler:
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def stop(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.stop()
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/reload",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
"node_id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance restarted",
|
||||
@ -149,18 +143,16 @@ class DockerHandler:
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def reload(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.restart()
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.restart()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/docker/vms/{id}",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}",
|
||||
parameters={
|
||||
"id": "ID for the container",
|
||||
"project_id": "UUID for the project"
|
||||
"project_id": "UUID for the project",
|
||||
"node_id": "ID for the container",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -170,18 +162,16 @@ class DockerHandler:
|
||||
description="Delete a Docker container")
|
||||
def delete(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.delete()
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.delete()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/suspend",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
"node_id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance paused",
|
||||
@ -193,17 +183,15 @@ class DockerHandler:
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def suspend(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.pause()
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.pause()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"id": "ID of the container",
|
||||
"node_id": "ID of the container",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -217,29 +205,21 @@ class DockerHandler:
|
||||
output=NIO_SCHEMA)
|
||||
def create_nio(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp"):
|
||||
raise HTTPConflict(
|
||||
text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = docker_manager.create_nio(
|
||||
int(request.match_info["adapter_number"]), request.json)
|
||||
adapter = container._ethernet_adapters[
|
||||
int(request.match_info["adapter_number"])
|
||||
]
|
||||
yield from container.adapter_add_nio_binding(
|
||||
int(request.match_info["adapter_number"]), nio)
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = docker_manager.create_nio(int(request.match_info["adapter_number"]), request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"id": "ID of the container",
|
||||
"node_id": "ID of the container",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -251,19 +231,16 @@ class DockerHandler:
|
||||
description="Remove a NIO from a Docker container")
|
||||
def delete_nio(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.adapter_remove_nio_binding(
|
||||
int(request.match_info["adapter_number"]))
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -277,7 +254,7 @@ class DockerHandler:
|
||||
def update(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vm.name = request.json.get("name", vm.name)
|
||||
vm.console = request.json.get("console", vm.console)
|
||||
vm.aux = request.json.get("aux", vm.aux)
|
||||
@ -292,10 +269,10 @@ class DockerHandler:
|
||||
response.json(vm)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -303,14 +280,14 @@ class DockerHandler:
|
||||
200: "Capture started",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist",
|
||||
409: "VM not started"
|
||||
409: "Node not started"
|
||||
},
|
||||
description="Start a packet capture on a IOU VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
description="Start a packet capture on a Docker VM instance",
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
|
||||
@ -320,10 +297,10 @@ class DockerHandler:
|
||||
response.json({"pcap_file_path": str(pcap_file_path)})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -337,7 +314,7 @@ class DockerHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
if not vm.is_running():
|
||||
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||
|
@ -22,7 +22,7 @@ from ....schemas.dynamips_device import DEVICE_CREATE_SCHEMA
|
||||
from ....schemas.dynamips_device import DEVICE_UPDATE_SCHEMA
|
||||
from ....schemas.dynamips_device import DEVICE_OBJECT_SCHEMA
|
||||
from ....schemas.dynamips_device import DEVICE_NIO_SCHEMA
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....compute.dynamips import Dynamips
|
||||
|
||||
|
||||
@ -198,7 +198,7 @@ class DynamipsDeviceHandler:
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a packet capture on a Dynamips device instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
|
@ -21,12 +21,12 @@ import base64
|
||||
|
||||
from ....web.route import Route
|
||||
from ....schemas.nio import NIO_SCHEMA
|
||||
from ....schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||
from ....schemas.dynamips_vm import VM_CREATE_SCHEMA
|
||||
from ....schemas.dynamips_vm import VM_UPDATE_SCHEMA
|
||||
from ....schemas.dynamips_vm import VM_OBJECT_SCHEMA
|
||||
from ....schemas.dynamips_vm import VM_CONFIGS_SCHEMA
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_LIST_IMAGES_SCHEMA
|
||||
from ....compute.dynamips import Dynamips
|
||||
from ....compute.dynamips.dynamips_error import DynamipsError
|
||||
from ....compute.project_manager import ProjectManager
|
||||
@ -46,7 +46,7 @@ class DynamipsVMHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms",
|
||||
r"/projects/{project_id}/dynamips/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -65,9 +65,9 @@ class DynamipsVMHandler:
|
||||
default_chassis = None
|
||||
if platform in DEFAULT_CHASSIS:
|
||||
default_chassis = DEFAULT_CHASSIS[platform]
|
||||
vm = yield from dynamips_manager.create_vm(request.json.pop("name"),
|
||||
vm = yield from dynamips_manager.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
request.json.get("node_id"),
|
||||
request.json.get("dynamips_id"),
|
||||
platform,
|
||||
console=request.json.get("console"),
|
||||
@ -80,10 +80,10 @@ class DynamipsVMHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -95,15 +95,15 @@ class DynamipsVMHandler:
|
||||
def show(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -117,17 +117,16 @@ class DynamipsVMHandler:
|
||||
def update(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from dynamips_manager.update_vm_settings(vm, request.json)
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -140,15 +139,15 @@ class DynamipsVMHandler:
|
||||
# check the project_id exists
|
||||
ProjectManager.instance().get_project(request.match_info["project_id"])
|
||||
|
||||
yield from Dynamips.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from Dynamips.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
@ -159,7 +158,7 @@ class DynamipsVMHandler:
|
||||
def start(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
try:
|
||||
yield from dynamips_manager.ghost_ios_support(vm)
|
||||
except GeneratorExit:
|
||||
@ -169,10 +168,10 @@ class DynamipsVMHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -183,16 +182,16 @@ class DynamipsVMHandler:
|
||||
def stop(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/suspend",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance suspended",
|
||||
@ -203,16 +202,16 @@ class DynamipsVMHandler:
|
||||
def suspend(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.suspend()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/resume",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/resume",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance resumed",
|
||||
@ -223,16 +222,16 @@ class DynamipsVMHandler:
|
||||
def resume(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.resume()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -243,15 +242,15 @@ class DynamipsVMHandler:
|
||||
def reload(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -266,7 +265,7 @@ class DynamipsVMHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio = yield from dynamips_manager.create_nio(vm, request.json)
|
||||
slot_number = int(request.match_info["adapter_number"])
|
||||
port_number = int(request.match_info["port_number"])
|
||||
@ -276,10 +275,10 @@ class DynamipsVMHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter from where the nio should be removed",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -292,7 +291,7 @@ class DynamipsVMHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
slot_number = int(request.match_info["adapter_number"])
|
||||
port_number = int(request.match_info["port_number"])
|
||||
nio = yield from vm.slot_remove_nio_binding(slot_number, port_number)
|
||||
@ -300,10 +299,10 @@ class DynamipsVMHandler:
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -313,11 +312,11 @@ class DynamipsVMHandler:
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a packet capture on a Dynamips VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
slot_number = int(request.match_info["adapter_number"])
|
||||
port_number = int(request.match_info["port_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
@ -333,10 +332,10 @@ class DynamipsVMHandler:
|
||||
response.json({"pcap_file_path": pcap_file_path})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -349,14 +348,14 @@ class DynamipsVMHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
slot_number = int(request.match_info["adapter_number"])
|
||||
port_number = int(request.match_info["port_number"])
|
||||
yield from vm.stop_capture(slot_number, port_number)
|
||||
response.set_status(204)
|
||||
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/configs",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/configs",
|
||||
status_codes={
|
||||
200: "Configs retrieved",
|
||||
400: "Invalid request",
|
||||
@ -367,8 +366,7 @@ class DynamipsVMHandler:
|
||||
def get_configs(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
startup_config_base64, private_config_base64 = yield from vm.extract_config()
|
||||
module_workdir = vm.project.module_working_directory(dynamips_manager.module_name.lower())
|
||||
@ -411,7 +409,7 @@ class DynamipsVMHandler:
|
||||
response.json(result)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/configs/save",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/configs/save",
|
||||
status_codes={
|
||||
200: "Configs saved",
|
||||
400: "Invalid request",
|
||||
@ -421,14 +419,12 @@ class DynamipsVMHandler:
|
||||
def save_configs(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.save_configs()
|
||||
response.set_status(200)
|
||||
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/idlepc_proposals",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/idlepc_proposals",
|
||||
status_codes={
|
||||
200: "Idle-PCs retrieved",
|
||||
400: "Invalid request",
|
||||
@ -438,16 +434,14 @@ class DynamipsVMHandler:
|
||||
def get_idlepcs(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.set_idlepc("0x0")
|
||||
idlepcs = yield from vm.get_idle_pc_prop()
|
||||
response.set_status(200)
|
||||
response.json(idlepcs)
|
||||
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/dynamips/vms/{vm_id}/auto_idlepc",
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/auto_idlepc",
|
||||
status_codes={
|
||||
200: "Best Idle-pc value found",
|
||||
400: "Invalid request",
|
||||
@ -457,19 +451,18 @@ class DynamipsVMHandler:
|
||||
def get_auto_idlepc(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
vm = dynamips_manager.get_vm(request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
idlepc = yield from dynamips_manager.auto_idlepc(vm)
|
||||
response.set_status(200)
|
||||
response.json({"idlepc": idlepc})
|
||||
|
||||
@Route.get(
|
||||
r"/dynamips/vms",
|
||||
r"/dynamips/nodes",
|
||||
status_codes={
|
||||
200: "List of Dynamips VM retrieved",
|
||||
},
|
||||
description="Retrieve the list of Dynamips VMS",
|
||||
output=VM_LIST_IMAGES_SCHEMA)
|
||||
output=NODE_LIST_IMAGES_SCHEMA)
|
||||
def list_vms(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
@ -478,13 +471,13 @@ class DynamipsVMHandler:
|
||||
response.json(vms)
|
||||
|
||||
@Route.post(
|
||||
r"/dynamips/vms/{path}",
|
||||
r"/dynamips/nodes/{path}",
|
||||
status_codes={
|
||||
204: "Image uploaded",
|
||||
},
|
||||
raw=True,
|
||||
description="Upload Dynamips image.")
|
||||
def upload_vm(request, response):
|
||||
description="Upload Dynamips image")
|
||||
def upload_image(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
yield from dynamips_manager.write_image(request.match_info["path"], request.content)
|
||||
|
@ -25,8 +25,8 @@ from ....schemas.iou import IOU_START_SCHEMA
|
||||
from ....schemas.iou import IOU_UPDATE_SCHEMA
|
||||
from ....schemas.iou import IOU_OBJECT_SCHEMA
|
||||
from ....schemas.iou import IOU_CONFIGS_SCHEMA
|
||||
from ....schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_LIST_IMAGES_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....compute.iou import IOU
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ class IOUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms",
|
||||
r"/projects/{project_id}/iou/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -53,10 +53,10 @@ class IOUHandler:
|
||||
def create(request, response):
|
||||
|
||||
iou = IOU.instance()
|
||||
vm = yield from iou.create_vm(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
console=request.json.get("console"))
|
||||
vm = yield from iou.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
console=request.json.get("console"))
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -74,10 +74,10 @@ class IOUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -89,15 +89,15 @@ class IOUHandler:
|
||||
def show(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -111,7 +111,7 @@ class IOUHandler:
|
||||
def update(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -124,10 +124,10 @@ class IOUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -137,15 +137,15 @@ class IOUHandler:
|
||||
description="Delete a IOU instance")
|
||||
def delete(request, response):
|
||||
|
||||
yield from IOU.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from IOU.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance started",
|
||||
@ -158,7 +158,7 @@ class IOUHandler:
|
||||
def start(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -169,10 +169,10 @@ class IOUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -183,16 +183,16 @@ class IOUHandler:
|
||||
def stop(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -203,15 +203,15 @@ class IOUHandler:
|
||||
def reload(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port where the nio should be added"
|
||||
},
|
||||
@ -226,7 +226,7 @@ class IOUHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_generic_ethernet"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
@ -237,10 +237,10 @@ class IOUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port from where the nio should be removed"
|
||||
},
|
||||
@ -253,15 +253,15 @@ class IOUHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
@ -272,25 +272,24 @@ class IOUHandler:
|
||||
409: "VM not started"
|
||||
},
|
||||
description="Start a packet capture on a IOU VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
port_number = int(request.match_info["port_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
|
||||
if not vm.is_running():
|
||||
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||
yield from vm.start_capture(adapter_number, port_number, pcap_file_path, request.json["data_link_type"])
|
||||
response.json({"pcap_file_path": str(pcap_file_path)})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -304,7 +303,7 @@ class IOUHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
if not vm.is_running():
|
||||
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||
@ -315,7 +314,7 @@ class IOUHandler:
|
||||
response.set_status(204)
|
||||
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/configs",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/configs",
|
||||
status_codes={
|
||||
200: "Configs retrieved",
|
||||
400: "Invalid request",
|
||||
@ -326,7 +325,7 @@ class IOUHandler:
|
||||
def get_configs(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
startup_config_content, private_config_content = vm.extract_configs()
|
||||
result = {}
|
||||
@ -352,7 +351,7 @@ class IOUHandler:
|
||||
response.json(result)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/iou/vms/{vm_id}/configs/save",
|
||||
r"/projects/{project_id}/iou/nodes/{node_id}/configs/save",
|
||||
status_codes={
|
||||
200: "Configs saved",
|
||||
400: "Invalid request",
|
||||
@ -362,32 +361,32 @@ class IOUHandler:
|
||||
def save_configs(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vm.save_configs()
|
||||
response.set_status(200)
|
||||
|
||||
@Route.get(
|
||||
r"/iou/vms",
|
||||
r"/iou/nodes",
|
||||
status_codes={
|
||||
200: "List of IOU VM retrieved",
|
||||
},
|
||||
description="Retrieve the list of IOU VMS",
|
||||
output=VM_LIST_IMAGES_SCHEMA)
|
||||
def list_vms(request, response):
|
||||
output=NODE_LIST_IMAGES_SCHEMA)
|
||||
def list_iou_nodes(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vms = yield from iou_manager.list_images()
|
||||
iou_nodes = yield from iou_manager.list_images()
|
||||
response.set_status(200)
|
||||
response.json(vms)
|
||||
response.json(iou_nodes)
|
||||
|
||||
@Route.post(
|
||||
r"/iou/vms/{path}",
|
||||
r"/iou/nodes/{path}",
|
||||
status_codes={
|
||||
204: "Image uploaded",
|
||||
},
|
||||
raw=True,
|
||||
description="Upload IOU image.")
|
||||
def upload_vm(request, response):
|
||||
def upload_image(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
yield from iou_manager.write_image(request.match_info["path"], request.content)
|
||||
|
@ -29,7 +29,7 @@ from ....schemas.qemu import QEMU_BINARY_FILTER_SCHEMA
|
||||
from ....schemas.qemu import QEMU_BINARY_LIST_SCHEMA
|
||||
from ....schemas.qemu import QEMU_CAPABILITY_LIST_SCHEMA
|
||||
from ....schemas.qemu import QEMU_IMAGE_CREATE_SCHEMA
|
||||
from ....schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||
from ....schemas.node import NODE_LIST_IMAGES_SCHEMA
|
||||
from ....compute.qemu import Qemu
|
||||
from ....config import Config
|
||||
|
||||
@ -42,7 +42,7 @@ class QEMUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms",
|
||||
r"/projects/{project_id}/qemu/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -57,14 +57,14 @@ class QEMUHandler:
|
||||
def create(request, response):
|
||||
|
||||
qemu = Qemu.instance()
|
||||
vm = yield from qemu.create_vm(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.pop("vm_id", None),
|
||||
linked_clone=request.json.get("linked_clone", True),
|
||||
qemu_path=request.json.pop("qemu_path", None),
|
||||
console=request.json.pop("console", None),
|
||||
console_type=request.json.pop("console_type", "telnet"),
|
||||
platform=request.json.pop("platform", None))
|
||||
vm = yield from qemu.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.pop("node_id", None),
|
||||
linked_clone=request.json.get("linked_clone", True),
|
||||
qemu_path=request.json.pop("qemu_path", None),
|
||||
console=request.json.pop("console", None),
|
||||
console_type=request.json.pop("console_type", "telnet"),
|
||||
platform=request.json.pop("platform", None))
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -75,10 +75,10 @@ class QEMUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -90,15 +90,15 @@ class QEMUHandler:
|
||||
def show(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -112,7 +112,7 @@ class QEMUHandler:
|
||||
def update(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -122,10 +122,10 @@ class QEMUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -135,15 +135,15 @@ class QEMUHandler:
|
||||
description="Delete a Qemu VM instance")
|
||||
def delete(request, response):
|
||||
|
||||
yield from Qemu.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from Qemu.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance started",
|
||||
@ -155,7 +155,7 @@ class QEMUHandler:
|
||||
def start(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
if sys.platform.startswith("linux") and qemu_manager.config.get_section_config("Qemu").getboolean("enable_kvm", True) \
|
||||
and "-no-kvm" not in vm.options:
|
||||
pm = ProjectManager.instance()
|
||||
@ -166,10 +166,10 @@ class QEMUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"vm_node": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -180,16 +180,16 @@ class QEMUHandler:
|
||||
def stop(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -200,16 +200,16 @@ class QEMUHandler:
|
||||
def reload(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/suspend",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance suspended",
|
||||
@ -220,16 +220,16 @@ class QEMUHandler:
|
||||
def suspend(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.suspend()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/resume",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/resume",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance resumed",
|
||||
@ -240,15 +240,15 @@ class QEMUHandler:
|
||||
def resume(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.resume()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -263,7 +263,7 @@ class QEMUHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
@ -274,10 +274,10 @@ class QEMUHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/qemu/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -290,7 +290,7 @@ class QEMUHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@ -363,27 +363,27 @@ class QEMUHandler:
|
||||
response.set_status(201)
|
||||
|
||||
@Route.get(
|
||||
r"/qemu/vms",
|
||||
r"/qemu/nodes",
|
||||
status_codes={
|
||||
200: "List of Qemu images retrieved",
|
||||
},
|
||||
description="Retrieve the list of Qemu images",
|
||||
output=VM_LIST_IMAGES_SCHEMA)
|
||||
def list_vms(request, response):
|
||||
output=NODE_LIST_IMAGES_SCHEMA)
|
||||
def list_vm_nodes(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
vms = yield from qemu_manager.list_images()
|
||||
vm_nodes = yield from qemu_manager.list_images()
|
||||
response.set_status(200)
|
||||
response.json(vms)
|
||||
response.json(vm_nodes)
|
||||
|
||||
@Route.post(
|
||||
r"/qemu/vms/{path:.+}",
|
||||
r"/qemu/nodes/{path:.+}",
|
||||
status_codes={
|
||||
204: "Image uploaded",
|
||||
},
|
||||
raw=True,
|
||||
description="Upload Qemu image.")
|
||||
def upload_vm(request, response):
|
||||
def upload_image(request, response):
|
||||
|
||||
qemu_manager = Qemu.instance()
|
||||
yield from qemu_manager.write_image(request.match_info["path"], request.content)
|
||||
|
@ -23,7 +23,7 @@ from ....schemas.nio import NIO_SCHEMA
|
||||
from ....schemas.virtualbox import VBOX_CREATE_SCHEMA
|
||||
from ....schemas.virtualbox import VBOX_UPDATE_SCHEMA
|
||||
from ....schemas.virtualbox import VBOX_OBJECT_SCHEMA
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....compute.virtualbox import VirtualBox
|
||||
from ....compute.project_manager import ProjectManager
|
||||
|
||||
@ -40,16 +40,16 @@ class VirtualBoxHandler:
|
||||
status_codes={
|
||||
200: "Success",
|
||||
},
|
||||
description="Get all VirtualBox VMs available")
|
||||
description="Get all available VirtualBox VMs")
|
||||
def index(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vms = yield from vbox_manager.list_images()
|
||||
vms = yield from vbox_manager.list_vms()
|
||||
response.json(vms)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms",
|
||||
r"/projects/{project_id}/virtualbox/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -64,13 +64,13 @@ class VirtualBoxHandler:
|
||||
def create(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = yield from vbox_manager.create_vm(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
request.json.pop("vmname"),
|
||||
request.json.pop("linked_clone"),
|
||||
console=request.json.get("console", None),
|
||||
adapters=request.json.get("adapters", 0))
|
||||
vm = yield from vbox_manager.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
request.json.pop("vmname"),
|
||||
request.json.pop("linked_clone"),
|
||||
console=request.json.get("console", None),
|
||||
adapters=request.json.get("adapters", 0))
|
||||
|
||||
if "enable_remote_console" in request.json:
|
||||
yield from vm.set_enable_remote_console(request.json.pop("enable_remote_console"))
|
||||
@ -81,7 +81,7 @@ class VirtualBoxHandler:
|
||||
yield from vm.set_ram(ram)
|
||||
|
||||
for name, value in request.json.items():
|
||||
if name != "vm_id":
|
||||
if name != "node_id":
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
setattr(vm, name, value)
|
||||
|
||||
@ -90,10 +90,10 @@ class VirtualBoxHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -105,15 +105,15 @@ class VirtualBoxHandler:
|
||||
def show(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -127,7 +127,7 @@ class VirtualBoxHandler:
|
||||
def update(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
if "vmname" in request.json:
|
||||
vmname = request.json.pop("vmname")
|
||||
@ -155,10 +155,10 @@ class VirtualBoxHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -171,15 +171,15 @@ class VirtualBoxHandler:
|
||||
# check the project_id exists
|
||||
ProjectManager.instance().get_project(request.match_info["project_id"])
|
||||
|
||||
yield from VirtualBox.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from VirtualBox.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
@ -190,7 +190,7 @@ class VirtualBoxHandler:
|
||||
def start(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
if (yield from vm.check_hw_virtualization()):
|
||||
pm = ProjectManager.instance()
|
||||
if pm.check_hardware_virtualization(vm) is False:
|
||||
@ -200,10 +200,10 @@ class VirtualBoxHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -214,16 +214,16 @@ class VirtualBoxHandler:
|
||||
def stop(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/suspend",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance suspended",
|
||||
@ -234,16 +234,16 @@ class VirtualBoxHandler:
|
||||
def suspend(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.suspend()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/resume",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/resume",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance resumed",
|
||||
@ -254,16 +254,16 @@ class VirtualBoxHandler:
|
||||
def resume(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.resume()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -274,15 +274,15 @@ class VirtualBoxHandler:
|
||||
def reload(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -297,7 +297,7 @@ class VirtualBoxHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
@ -308,10 +308,10 @@ class VirtualBoxHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter from where the nio should be removed",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -324,15 +324,15 @@ class VirtualBoxHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -342,21 +342,21 @@ class VirtualBoxHandler:
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a packet capture on a VirtualBox VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
yield from vm.start_capture(adapter_number, pcap_file_path)
|
||||
response.json({"pcap_file_path": pcap_file_path})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -369,6 +369,6 @@ class VirtualBoxHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
vm = vbox_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vm.stop_capture(int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
@ -22,7 +22,7 @@ from ....web.route import Route
|
||||
from ....schemas.vmware import VMWARE_CREATE_SCHEMA
|
||||
from ....schemas.vmware import VMWARE_UPDATE_SCHEMA
|
||||
from ....schemas.vmware import VMWARE_OBJECT_SCHEMA
|
||||
from ....schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ....schemas.node import NODE_CAPTURE_SCHEMA
|
||||
from ....schemas.nio import NIO_SCHEMA
|
||||
from ....compute.vmware import VMware
|
||||
from ....compute.project_manager import ProjectManager
|
||||
@ -49,7 +49,7 @@ class VMwareHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms",
|
||||
r"/projects/{project_id}/vmware/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -64,15 +64,15 @@ class VMwareHandler:
|
||||
def create(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = yield from vmware_manager.create_vm(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
request.json.pop("vmx_path"),
|
||||
request.json.pop("linked_clone"),
|
||||
console=request.json.get("console", None))
|
||||
vm = yield from vmware_manager.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
request.json.pop("vmx_path"),
|
||||
request.json.pop("linked_clone"),
|
||||
console=request.json.get("console", None))
|
||||
|
||||
for name, value in request.json.items():
|
||||
if name != "vm_id":
|
||||
if name != "node_id":
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
setattr(vm, name, value)
|
||||
|
||||
@ -81,10 +81,10 @@ class VMwareHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -96,15 +96,15 @@ class VMwareHandler:
|
||||
def show(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -118,7 +118,7 @@ class VMwareHandler:
|
||||
def update(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
@ -128,10 +128,10 @@ class VMwareHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -143,15 +143,15 @@ class VMwareHandler:
|
||||
|
||||
# check the project_id exists
|
||||
ProjectManager.instance().get_project(request.match_info["project_id"])
|
||||
yield from VMware.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from VMware.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
@ -162,7 +162,7 @@ class VMwareHandler:
|
||||
def start(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
if vm.check_hw_virtualization():
|
||||
pm = ProjectManager.instance()
|
||||
if pm.check_hardware_virtualization(vm) is False:
|
||||
@ -172,10 +172,10 @@ class VMwareHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -186,16 +186,16 @@ class VMwareHandler:
|
||||
def stop(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/suspend",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance suspended",
|
||||
@ -206,16 +206,16 @@ class VMwareHandler:
|
||||
def suspend(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.suspend()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/resume",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/resume",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance resumed",
|
||||
@ -226,16 +226,16 @@ class VMwareHandler:
|
||||
def resume(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.resume()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -246,15 +246,15 @@ class VMwareHandler:
|
||||
def reload(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -269,7 +269,7 @@ class VMwareHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
@ -280,10 +280,10 @@ class VMwareHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter from where the nio should be removed",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -296,15 +296,15 @@ class VMwareHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -314,21 +314,21 @@ class VMwareHandler:
|
||||
404: "Instance doesn't exist",
|
||||
},
|
||||
description="Start a packet capture on a VMware VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
input=NODE_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
yield from vm.start_capture(adapter_number, pcap_file_path)
|
||||
response.json({"pcap_file_path": pcap_file_path})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
@ -341,17 +341,17 @@ class VMwareHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
yield from vm.stop_capture(adapter_number)
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vmware/vms/{vm_id}/interfaces/vmnet",
|
||||
r"/projects/{project_id}/vmware/nodes/{node_id}/interfaces/vmnet",
|
||||
parameters={
|
||||
"project_id": "The UUID of the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
201: "VMnet interface allocated",
|
||||
@ -360,7 +360,7 @@ class VMwareHandler:
|
||||
def allocate_vmnet(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vmware_manager.refresh_vmnet_list(ubridge=False)
|
||||
vmnet = vmware_manager.allocate_vmnet()
|
||||
vm.vmnets.append(vmnet)
|
||||
|
@ -32,7 +32,7 @@ class VPCSHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms",
|
||||
r"/projects/{project_id}/vpcs/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -47,20 +47,20 @@ class VPCSHandler:
|
||||
def create(request, response):
|
||||
|
||||
vpcs = VPCS.instance()
|
||||
vm = yield from vpcs.create_vm(request.json["name"],
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
console=request.json.get("console"),
|
||||
startup_script=request.json.get("startup_script"))
|
||||
node = yield from vpcs.create_node(request.json["name"],
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
console=request.json.get("console"),
|
||||
startup_script=request.json.get("startup_script"))
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Success",
|
||||
@ -72,15 +72,15 @@ class VPCSHandler:
|
||||
def show(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
@ -94,7 +94,7 @@ class VPCSHandler:
|
||||
def update(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vm.name = request.json.get("name", vm.name)
|
||||
vm.console = request.json.get("console", vm.console)
|
||||
vm.startup_script = request.json.get("startup_script", vm.startup_script)
|
||||
@ -102,10 +102,10 @@ class VPCSHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
@ -115,15 +115,15 @@ class VPCSHandler:
|
||||
description="Delete a VPCS instance")
|
||||
def delete(request, response):
|
||||
|
||||
yield from VPCS.instance().delete_vm(request.match_info["vm_id"])
|
||||
yield from VPCS.instance().delete_node(request.match_info["node_id"])
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
@ -135,16 +135,16 @@ class VPCSHandler:
|
||||
def start(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.start()
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
"node_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
@ -155,16 +155,16 @@ class VPCSHandler:
|
||||
def stop(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance reloaded",
|
||||
@ -175,15 +175,15 @@ class VPCSHandler:
|
||||
def reload(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
yield from vm.reload()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port where the nio should be added"
|
||||
},
|
||||
@ -198,7 +198,7 @@ class VPCSHandler:
|
||||
def create_nio(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
@ -209,10 +209,10 @@ class VPCSHandler:
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"node_id": "UUID for the instance",
|
||||
"adapter_number": "Network adapter where the nio is located",
|
||||
"port_number": "Port from where the nio should be removed"
|
||||
},
|
||||
@ -225,6 +225,6 @@ class VPCSHandler:
|
||||
def delete_nio(request, response):
|
||||
|
||||
vpcs_manager = VPCS.instance()
|
||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
vm.port_remove_nio_binding(int(request.match_info["port_number"]))
|
||||
response.set_status(204)
|
||||
|
@ -18,5 +18,5 @@
|
||||
from .compute_handler import ComputeHandler
|
||||
from .project_handler import ProjectHandler
|
||||
from .version_handler import VersionHandler
|
||||
from .vm_handler import VMHandler
|
||||
from .node_handler import NodeHandler
|
||||
from .link_handler import LinkHandler
|
||||
|
@ -44,7 +44,7 @@ class ComputeHandler:
|
||||
output=COMPUTE_OBJECT_SCHEMA)
|
||||
def create(request, response):
|
||||
|
||||
compute = yield from Controller.instance().addCompute(**request.json)
|
||||
compute = yield from Controller.instance().add_compute(**request.json)
|
||||
response.set_status(201)
|
||||
response.json(compute)
|
||||
|
||||
|
@ -16,11 +16,9 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import aiohttp
|
||||
import asyncio
|
||||
|
||||
from ....web.route import Route
|
||||
from ....schemas.link import LINK_OBJECT_SCHEMA, LINK_CAPTURE_SCHEMA
|
||||
from ....controller.project import Project
|
||||
from ....controller import Controller
|
||||
|
||||
|
||||
@ -45,12 +43,10 @@ class LinkHandler:
|
||||
def create(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
link = yield from project.addLink()
|
||||
for vm in request.json["vms"]:
|
||||
yield from link.addVM(project.getVM(vm["vm_id"]),
|
||||
vm["adapter_number"],
|
||||
vm["port_number"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
link = yield from project.add_link()
|
||||
for node in request.json["nodes"]:
|
||||
yield from link.add_node(project.get_node(node["node_id"]), node["adapter_number"], node["port_number"])
|
||||
yield from link.create()
|
||||
response.set_status(201)
|
||||
response.json(link)
|
||||
@ -72,8 +68,8 @@ class LinkHandler:
|
||||
def start_capture(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
link = project.getLink(request.match_info["link_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
link = project.get_link(request.match_info["link_id"])
|
||||
yield from link.start_capture(data_link_type=request.json.get("data_link_type", "DLT_EN10MB"), capture_file_name=request.json.get("capture_file_name"))
|
||||
response.set_status(201)
|
||||
response.json(link)
|
||||
@ -93,8 +89,8 @@ class LinkHandler:
|
||||
def stop_capture(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
link = project.getLink(request.match_info["link_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
link = project.get_link(request.match_info["link_id"])
|
||||
yield from link.stop_capture()
|
||||
response.set_status(201)
|
||||
response.json(link)
|
||||
@ -114,8 +110,8 @@ class LinkHandler:
|
||||
def delete(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
link = project.getLink(request.match_info["link_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
link = project.get_link(request.match_info["link_id"])
|
||||
yield from link.delete()
|
||||
response.set_status(204)
|
||||
response.json(link)
|
||||
@ -136,8 +132,8 @@ class LinkHandler:
|
||||
def pcap(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
link = project.getLink(request.match_info["link_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
link = project.get_link(request.match_info["link_id"])
|
||||
|
||||
if link.capture_file_path is None:
|
||||
raise aiohttp.web.HTTPNotFound(text="pcap file not found")
|
||||
|
@ -16,19 +16,18 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from ....web.route import Route
|
||||
from ....schemas.vm import VM_OBJECT_SCHEMA, VM_UPDATE_SCHEMA
|
||||
from ....controller.project import Project
|
||||
from ....schemas.node import NODE_OBJECT_SCHEMA, NODE_UPDATE_SCHEMA
|
||||
from ....controller import Controller
|
||||
|
||||
|
||||
class VMHandler:
|
||||
class NodeHandler:
|
||||
"""
|
||||
API entry point for VM
|
||||
API entry point for node
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vms",
|
||||
r"/projects/{project_id}/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
@ -36,155 +35,155 @@ class VMHandler:
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Create a new VM instance",
|
||||
input=VM_OBJECT_SCHEMA,
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Create a new node instance",
|
||||
input=NODE_OBJECT_SCHEMA,
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def create(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
compute = controller.getCompute(request.json.pop("compute_id"))
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
vm = yield from project.addVM(compute, request.json.pop("vm_id", None), **request.json)
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
node = yield from project.add_node(compute, request.json.pop("node_id", None), **request.json)
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/vms",
|
||||
r"/projects/{project_id}/nodes",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
status_codes={
|
||||
200: "List of VMS",
|
||||
200: "List of nodes",
|
||||
},
|
||||
description="List VMs of a project")
|
||||
def list_vms(request, response):
|
||||
description="List nodes of a project")
|
||||
def list_nodes(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
response.json([ v for v in project.vms.values() ])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
response.json([ v for v in project.nodes.values() ])
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/vms/{vm_id}",
|
||||
r"/projects/{project_id}/nodes/{node_id}",
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Update a VM instance",
|
||||
input=VM_UPDATE_SCHEMA,
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Update a node instance",
|
||||
input=NODE_UPDATE_SCHEMA,
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def update(request, response):
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
|
||||
# Ignore this, because we use it only in create
|
||||
request.json.pop("vm_id", None)
|
||||
request.json.pop("vm_type", None)
|
||||
request.json.pop("node_id", None)
|
||||
request.json.pop("node_type", None)
|
||||
request.json.pop("compute_id", None)
|
||||
|
||||
yield from vm.update(**request.json)
|
||||
yield from node.update(**request.json)
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vms/{vm_id}/start",
|
||||
r"/projects/{project_id}/nodes/{node_id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the VM"
|
||||
"node_id": "UUID for the node"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Start a VM instance",
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Start a node instance",
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def start(request, response):
|
||||
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
yield from vm.start()
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
yield from node.start()
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vms/{vm_id}/stop",
|
||||
r"/projects/{project_id}/nodes/{node_id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the VM"
|
||||
"node_id": "UUID for the node"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Start a VM instance",
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Start a node instance",
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def stop(request, response):
|
||||
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
yield from vm.stop()
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
yield from node.stop()
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vms/{vm_id}/suspend",
|
||||
r"/projects/{project_id}/nodes/{node_id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the VM"
|
||||
"node_id": "UUID for the node"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Start a VM instance",
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Start a node instance",
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def suspend(request, response):
|
||||
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
yield from vm.suspend()
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
yield from node.suspend()
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/vms/{vm_id}/reload",
|
||||
r"/projects/{project_id}/nodes/{node_id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the VM"
|
||||
"node_id": "UUID for the node"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Reload a VM instance",
|
||||
output=VM_OBJECT_SCHEMA)
|
||||
description="Reload a node instance",
|
||||
output=NODE_OBJECT_SCHEMA)
|
||||
def reload(request, response):
|
||||
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
yield from vm.reload()
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
yield from node.reload()
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
response.json(node)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/vms/{vm_id}",
|
||||
r"/projects/{project_id}/nodes/{node_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the VM"
|
||||
"node_id": "UUID for the node"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance deleted",
|
||||
400: "Invalid request"
|
||||
},
|
||||
description="Delete a VM instance")
|
||||
description="Delete a node instance")
|
||||
def delete(request, response):
|
||||
project = Controller.instance().getProject(request.match_info["project_id"])
|
||||
vm = project.getVM(request.match_info["vm_id"])
|
||||
yield from vm.destroy()
|
||||
project = Controller.instance().get_project(request.match_info["project_id"])
|
||||
node = project.get_node(request.match_info["node_id"])
|
||||
yield from node.destroy()
|
||||
response.set_status(201)
|
@ -75,7 +75,7 @@ class ProjectHandler:
|
||||
})
|
||||
def get(request, response):
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
response.json(project)
|
||||
|
||||
@classmethod
|
||||
@ -92,7 +92,7 @@ class ProjectHandler:
|
||||
def commit(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
yield from project.commit()
|
||||
response.set_status(204)
|
||||
|
||||
@ -110,7 +110,7 @@ class ProjectHandler:
|
||||
def close(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
yield from project.close()
|
||||
controller.removeProject(project)
|
||||
response.set_status(204)
|
||||
@ -129,7 +129,7 @@ class ProjectHandler:
|
||||
def delete(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
yield from project.delete()
|
||||
controller.removeProject(project)
|
||||
response.set_status(204)
|
||||
@ -148,7 +148,7 @@ class ProjectHandler:
|
||||
def notification(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
|
||||
response.content_type = "application/json"
|
||||
response.set_status(200)
|
||||
@ -179,7 +179,7 @@ class ProjectHandler:
|
||||
def notification_ws(request, response):
|
||||
|
||||
controller = Controller.instance()
|
||||
project = controller.getProject(request.match_info["project_id"])
|
||||
project = controller.get_project(request.match_info["project_id"])
|
||||
|
||||
ws = aiohttp.web.WebSocketResponse()
|
||||
yield from ws.prepare(request)
|
||||
|
@ -61,7 +61,7 @@ class IndexHandler:
|
||||
def project(request, response):
|
||||
controller = Controller.instance()
|
||||
response.template("project.html",
|
||||
project=controller.getProject(request.match_info["project_id"]))
|
||||
project=controller.get_project(request.match_info["project_id"]))
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
|
@ -21,7 +21,7 @@ DOCKER_CREATE_SCHEMA = {
|
||||
"description": "Request validation to create a new Docker container",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "Docker VM instance identifier",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -158,7 +158,7 @@ DOCKER_OBJECT_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "Docker container instance UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -235,7 +235,7 @@ DOCKER_OBJECT_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["vm_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "console_resolution", "start_command", "environment", "vm_directory"]
|
||||
"required": ["node_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "console_resolution", "start_command", "environment", "vm_directory"]
|
||||
}
|
||||
|
||||
|
||||
|
@ -21,8 +21,8 @@ VM_CREATE_SCHEMA = {
|
||||
"description": "Request validation to create a new Dynamips VM instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"description": "Dynamips VM instance identifier",
|
||||
"node_id": {
|
||||
"description": "Node identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
"minLength": 36,
|
||||
@ -509,15 +509,15 @@ VM_OBJECT_SCHEMA = {
|
||||
"description": "ID to use with Dynamips",
|
||||
"type": "integer"
|
||||
},
|
||||
"vm_id": {
|
||||
"description": "Dynamips router instance UUID",
|
||||
"node_id": {
|
||||
"description": "Node instance UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||
},
|
||||
"vm_directory": {
|
||||
"decription": "Path to the VM working directory",
|
||||
"node_directory": {
|
||||
"decription": "Path to the node working directory",
|
||||
"type": "string"
|
||||
},
|
||||
"project_id": {
|
||||
@ -749,7 +749,7 @@ VM_OBJECT_SCHEMA = {
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_id", "project_id", "dynamips_id", "console", "console_type"]
|
||||
"required": ["name", "node_id", "project_id", "dynamips_id", "console", "console_type"]
|
||||
}
|
||||
|
||||
VM_CONFIGS_SCHEMA = {
|
||||
|
@ -26,7 +26,7 @@ IOU_CREATE_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "IOU VM identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
@ -187,7 +187,7 @@ IOU_OBJECT_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "IOU VM UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -261,7 +261,7 @@ IOU_OBJECT_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_id", "console", "project_id", "path", "md5sum", "serial_adapters", "ethernet_adapters",
|
||||
"required": ["name", "node_id", "console", "project_id", "path", "md5sum", "serial_adapters", "ethernet_adapters",
|
||||
"ram", "nvram", "l1_keepalives", "startup_config", "private_config", "use_default_iou_values",
|
||||
"command_line"]
|
||||
}
|
||||
|
@ -28,13 +28,13 @@ LINK_OBJECT_SCHEMA = {
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||
},
|
||||
"vms": {
|
||||
"nodes": {
|
||||
"description": "List of the VMS",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VM identifier",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -50,7 +50,7 @@ LINK_OBJECT_SCHEMA = {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": ["vm_id", "adapter_number", "port_number"],
|
||||
"required": ["node_id", "adapter_number", "port_number"],
|
||||
"additionalProperties": False
|
||||
}
|
||||
},
|
||||
@ -67,7 +67,7 @@ LINK_OBJECT_SCHEMA = {
|
||||
"type": ["string", "null"]
|
||||
}
|
||||
},
|
||||
"required": ["vms"],
|
||||
"required": ["nodes"],
|
||||
"additionalProperties": False
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
VM_LIST_IMAGES_SCHEMA = {
|
||||
NODE_LIST_IMAGES_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "List of disk images",
|
||||
"type": "array",
|
||||
@ -43,7 +43,7 @@ VM_LIST_IMAGES_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
VM_CAPTURE_SCHEMA = {
|
||||
NODE_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a port",
|
||||
"type": "object",
|
||||
@ -63,9 +63,9 @@ VM_CAPTURE_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
VM_OBJECT_SCHEMA = {
|
||||
NODE_OBJECT_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "A VM object",
|
||||
"description": "A node object",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"compute_id": {
|
||||
@ -76,19 +76,19 @@ VM_OBJECT_SCHEMA = {
|
||||
"description": "Project identifier",
|
||||
"type": "string"
|
||||
},
|
||||
"vm_id": {
|
||||
"description": "VM identifier",
|
||||
"node_id": {
|
||||
"description": "Node identifier",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||
},
|
||||
"vm_type": {
|
||||
"description": "Type of VM",
|
||||
"node_type": {
|
||||
"description": "Type of node",
|
||||
"enum": ["docker", "dynamips", "vpcs", "virtualbox", "vmware", "iou", "qemu"]
|
||||
},
|
||||
"name": {
|
||||
"description": "VM name",
|
||||
"description": "Node name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
@ -108,8 +108,8 @@ VM_OBJECT_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_type", "compute_id"]
|
||||
"required": ["name", "node_type", "compute_id"]
|
||||
}
|
||||
|
||||
VM_UPDATE_SCHEMA = VM_OBJECT_SCHEMA
|
||||
del VM_UPDATE_SCHEMA["required"]
|
||||
NODE_UPDATE_SCHEMA = NODE_OBJECT_SCHEMA
|
||||
del NODE_UPDATE_SCHEMA["required"]
|
@ -23,7 +23,7 @@ QEMU_CREATE_SCHEMA = {
|
||||
"description": "Request validation to create a new QEMU VM instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "QEMU VM identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
@ -382,7 +382,7 @@ QEMU_OBJECT_SCHEMA = {
|
||||
"description": "Request validation for a QEMU VM instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "QEMU VM uuid",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
@ -564,7 +564,7 @@ QEMU_OBJECT_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["vm_id",
|
||||
"required": ["node_id",
|
||||
"project_id",
|
||||
"name",
|
||||
"usage",
|
||||
|
@ -21,7 +21,7 @@ VBOX_CREATE_SCHEMA = {
|
||||
"description": "Request validation to create a new VirtualBox VM instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VirtualBox VM instance identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
@ -158,7 +158,7 @@ VBOX_OBJECT_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VirtualBox VM instance UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -222,5 +222,5 @@ VBOX_OBJECT_SCHEMA = {
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_id", "project_id", "vm_directory"]
|
||||
"required": ["name", "node_id", "project_id", "vm_directory"]
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ VMWARE_CREATE_SCHEMA = {
|
||||
"description": "Request validation to create a new VMware VM instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VMware VM instance identifier",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -151,7 +151,7 @@ VMWARE_OBJECT_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VMware VM instance UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -213,5 +213,5 @@ VMWARE_OBJECT_SCHEMA = {
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_id", "project_id"]
|
||||
"required": ["name", "node_id", "project_id"]
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ VPCS_CREATE_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VPCS VM identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
@ -97,7 +97,7 @@ VPCS_OBJECT_SCHEMA = {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"vm_id": {
|
||||
"node_id": {
|
||||
"description": "VPCS VM UUID",
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
@ -143,5 +143,5 @@ VPCS_OBJECT_SCHEMA = {
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["name", "vm_id", "status", "console", "console_type", "project_id", "startup_script_path", "command_line"]
|
||||
"required": ["name", "node_id", "status", "console", "console_type", "project_id", "startup_script_path", "command_line"]
|
||||
}
|
||||
|
@ -19,21 +19,21 @@ socket.onmessage = function (event) {
|
||||
Compute status
|
||||
</h1>
|
||||
The purpose of this page is to help for GNS3 debug. This can be dropped
|
||||
in futur GNS3 versions.
|
||||
in future GNS3 versions.
|
||||
|
||||
<h2>Opened projects</h2>
|
||||
<table border="1">
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>ID</td>
|
||||
<th>VMs</th>
|
||||
<th>Nodes</th>
|
||||
<th>Clients connected</th>
|
||||
</tr>
|
||||
{% for project in project_manager.projects %}
|
||||
<tr>
|
||||
<td>{{project.name}}</td>
|
||||
<td>{{project.id}}</td>
|
||||
<td>{{project.vms|length}}</td>
|
||||
<td>{{project.nodes|length}}</td>
|
||||
<td>{{project.listeners|length}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
|
@ -12,14 +12,14 @@ in futur GNS3 versions.
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>ID</td>
|
||||
<th>VMs</th>
|
||||
<th>Nodes</th>
|
||||
<th>Links</th>
|
||||
</tr>
|
||||
{% for project in controller.projects.values() %}
|
||||
<tr>
|
||||
<td><a href="/projects/{{project.id}}">{{project.name}}</a></td>
|
||||
<td><a href="/projects/{{project.id}}">{{project.id}}</a></td>
|
||||
<td>{{project.vms|length}}</td>
|
||||
<td>{{project.nodes|length}}</td>
|
||||
<td>{{project.links|length}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
|
@ -22,7 +22,7 @@ socket.onmessage = function (event) {
|
||||
The purpose of this page is to help for GNS3 debug. This can be dropped
|
||||
in futur GNS3 versions.
|
||||
|
||||
<h2>VMs</h2>
|
||||
<h2>Nodes</h2>
|
||||
<table border="1">
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
@ -30,12 +30,12 @@ in futur GNS3 versions.
|
||||
<th>Compute</th>
|
||||
<th>Console</th>
|
||||
</tr>
|
||||
{% for vm in project.vms.values() %}
|
||||
{% for node in project.nodes.values() %}
|
||||
<tr>
|
||||
<td>{{vm.name}}</td>
|
||||
<td>{{vm.id}}</td>
|
||||
<td>{{vm.compute.id}}</td>
|
||||
<td><a href="{{vm.console_type}}://{{vm.host}}:{{vm.console}}">Console</a>
|
||||
<td>{{node.name}}</td>
|
||||
<td>{{node.id}}</td>
|
||||
<td>{{node.compute.id}}</td>
|
||||
<td><a href="{{node.console_type}}://{{node.host}}:{{node.console}}">Console</a>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
@ -36,16 +36,16 @@ class TelnetServer(threading.Thread):
|
||||
"""
|
||||
Mini Telnet Server.
|
||||
|
||||
:param vm_name: Virtual machine name
|
||||
:param pipe_path: path to VM pipe (UNIX socket on Linux/UNIX, Named Pipe on Windows)
|
||||
:param node_name: node name
|
||||
:param pipe_path: path to node pipe (UNIX socket on Linux/UNIX, Named Pipe on Windows)
|
||||
:param host: server host
|
||||
:param port: server port
|
||||
"""
|
||||
|
||||
def __init__(self, vm_name, pipe_path, host, port):
|
||||
def __init__(self, node_name, pipe_path, host, port):
|
||||
|
||||
threading.Thread.__init__(self)
|
||||
self._vm_name = vm_name
|
||||
self._node_name = node_name
|
||||
self._pipe = pipe_path
|
||||
self._host = host
|
||||
self._port = port
|
||||
@ -101,7 +101,7 @@ class TelnetServer(threading.Thread):
|
||||
return False
|
||||
|
||||
if not self._alive:
|
||||
log.info("Telnet server for {} is exiting".format(self._vm_name))
|
||||
log.info("Telnet server for {} is exiting".format(self._node_name))
|
||||
return True
|
||||
|
||||
for sock_fileno in rlist:
|
||||
@ -117,7 +117,7 @@ class TelnetServer(threading.Thread):
|
||||
log.error("could not accept new client: {}".format(e))
|
||||
continue
|
||||
|
||||
new_client = TelnetClient(self._vm_name, sock, host, port)
|
||||
new_client = TelnetClient(self._node_name, sock, host, port)
|
||||
self._clients[sock.fileno()] = new_client
|
||||
|
||||
if self._use_thread and not self._reader_thread:
|
||||
@ -258,13 +258,13 @@ class TelnetClient(object):
|
||||
"""
|
||||
Represents a Telnet client connection.
|
||||
|
||||
:param vm_name: VM name
|
||||
:param node_name: Node name
|
||||
:param sock: socket connection
|
||||
:param host: IP of the Telnet client
|
||||
:param port: port of the Telnet client
|
||||
"""
|
||||
|
||||
def __init__(self, vm_name, sock, host, port):
|
||||
def __init__(self, node_name, sock, host, port):
|
||||
|
||||
self._active = True
|
||||
self._sock = sock
|
||||
@ -276,7 +276,7 @@ class TelnetClient(object):
|
||||
IAC, WILL, BINARY,
|
||||
IAC, DO, BINARY]))
|
||||
|
||||
welcome_msg = "{} console is now available... Press RETURN to get started.\r\n".format(vm_name)
|
||||
welcome_msg = "{} console is now available... Press RETURN to get started.\r\n".format(node_name)
|
||||
sock.send(welcome_msg.encode('utf-8'))
|
||||
|
||||
def is_active(self):
|
||||
|
@ -25,7 +25,7 @@ import traceback
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from ..compute.vm_error import VMError
|
||||
from ..compute.node_error import NodeError
|
||||
from ..controller.controller_error import ControllerError
|
||||
from ..ubridge.ubridge_error import UbridgeError
|
||||
from .response import Response
|
||||
@ -67,7 +67,7 @@ class Route(object):
|
||||
_routes = []
|
||||
_documentation = {}
|
||||
|
||||
_vm_locks = {}
|
||||
_node_locks = {}
|
||||
|
||||
@classmethod
|
||||
def get(cls, path, *args, **kw):
|
||||
@ -197,8 +197,8 @@ class Route(object):
|
||||
response = Response(request=request, route=route)
|
||||
response.set_status(409)
|
||||
response.json({"message": str(e), "status": 409})
|
||||
except (VMError, UbridgeError) as e:
|
||||
log.error("VM error detected: {type}".format(type=type(e)), exc_info=1)
|
||||
except (NodeError, UbridgeError) as e:
|
||||
log.error("Node error detected: {type}".format(type=type(e)), exc_info=1)
|
||||
response = Response(request=request, route=route)
|
||||
response.set_status(409)
|
||||
response.json({"message": str(e), "status": 409})
|
||||
@ -234,39 +234,39 @@ class Route(object):
|
||||
return response
|
||||
|
||||
@asyncio.coroutine
|
||||
def vm_concurrency(request):
|
||||
def node_concurrency(request):
|
||||
"""
|
||||
To avoid strange effect we prevent concurrency
|
||||
between the same instance of the vm
|
||||
between the same instance of the node
|
||||
"""
|
||||
|
||||
if "vm_id" in request.match_info or "device_id" in request.match_info:
|
||||
vm_id = request.match_info.get("vm_id")
|
||||
if vm_id is None:
|
||||
vm_id = request.match_info["device_id"]
|
||||
if "node_id" in request.match_info or "device_id" in request.match_info:
|
||||
node_id = request.match_info.get("node_id")
|
||||
if node_id is None:
|
||||
node_id = request.match_info["device_id"]
|
||||
|
||||
if "compute" in request.path:
|
||||
type = "compute"
|
||||
else:
|
||||
type = "controller"
|
||||
lock_key = "{}:{}:{}".format(type, request.match_info["project_id"], vm_id)
|
||||
cls._vm_locks.setdefault(lock_key, {"lock": asyncio.Lock(), "concurrency": 0})
|
||||
cls._vm_locks[lock_key]["concurrency"] += 1
|
||||
lock_key = "{}:{}:{}".format(type, request.match_info["project_id"], node_id)
|
||||
cls._node_locks.setdefault(lock_key, {"lock": asyncio.Lock(), "concurrency": 0})
|
||||
cls._node_locks[lock_key]["concurrency"] += 1
|
||||
|
||||
with (yield from cls._vm_locks[lock_key]["lock"]):
|
||||
with (yield from cls._node_locks[lock_key]["lock"]):
|
||||
response = yield from control_schema(request)
|
||||
cls._vm_locks[lock_key]["concurrency"] -= 1
|
||||
cls._node_locks[lock_key]["concurrency"] -= 1
|
||||
|
||||
# No more waiting requests, garbage collect the lock
|
||||
if cls._vm_locks[lock_key]["concurrency"] <= 0:
|
||||
del cls._vm_locks[lock_key]
|
||||
if cls._node_locks[lock_key]["concurrency"] <= 0:
|
||||
del cls._node_locks[lock_key]
|
||||
else:
|
||||
response = yield from control_schema(request)
|
||||
return response
|
||||
|
||||
cls._routes.append((method, route, vm_concurrency))
|
||||
cls._routes.append((method, route, node_concurrency))
|
||||
|
||||
return vm_concurrency
|
||||
return node_concurrency
|
||||
return register
|
||||
|
||||
@classmethod
|
||||
|
@ -53,7 +53,7 @@ def test_json(vm, project):
|
||||
'image': 'ubuntu',
|
||||
'name': 'test',
|
||||
'project_id': project.id,
|
||||
'vm_id': vm.id,
|
||||
'node_id': vm.id,
|
||||
'adapters': 1,
|
||||
'console': vm.console,
|
||||
'console_type': 'telnet',
|
||||
|
@ -44,13 +44,13 @@ def test_get_application_id(loop, project, iou):
|
||||
vm1_id = str(uuid.uuid4())
|
||||
vm2_id = str(uuid.uuid4())
|
||||
vm3_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(iou.create_vm("PC 1", project.id, vm1_id))
|
||||
loop.run_until_complete(iou.create_vm("PC 2", project.id, vm2_id))
|
||||
loop.run_until_complete(iou.create_node("PC 1", project.id, vm1_id))
|
||||
loop.run_until_complete(iou.create_node("PC 2", project.id, vm2_id))
|
||||
assert iou.get_application_id(vm1_id) == 1
|
||||
assert iou.get_application_id(vm1_id) == 1
|
||||
assert iou.get_application_id(vm2_id) == 2
|
||||
loop.run_until_complete(iou.delete_vm(vm1_id))
|
||||
loop.run_until_complete(iou.create_vm("PC 3", project.id, vm3_id))
|
||||
loop.run_until_complete(iou.delete_node(vm1_id))
|
||||
loop.run_until_complete(iou.create_node("PC 3", project.id, vm3_id))
|
||||
assert iou.get_application_id(vm3_id) == 1
|
||||
|
||||
|
||||
@ -60,9 +60,9 @@ def test_get_application_id_multiple_project(loop, iou):
|
||||
vm3_id = str(uuid.uuid4())
|
||||
project1 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
||||
project2 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
||||
loop.run_until_complete(iou.create_vm("PC 1", project1.id, vm1_id))
|
||||
loop.run_until_complete(iou.create_vm("PC 2", project1.id, vm2_id))
|
||||
loop.run_until_complete(iou.create_vm("PC 2", project2.id, vm3_id))
|
||||
loop.run_until_complete(iou.create_node("PC 1", project1.id, vm1_id))
|
||||
loop.run_until_complete(iou.create_node("PC 2", project1.id, vm2_id))
|
||||
loop.run_until_complete(iou.create_node("PC 2", project2.id, vm3_id))
|
||||
assert iou.get_application_id(vm1_id) == 1
|
||||
assert iou.get_application_id(vm2_id) == 2
|
||||
assert iou.get_application_id(vm3_id) == 3
|
||||
@ -71,9 +71,9 @@ def test_get_application_id_multiple_project(loop, iou):
|
||||
def test_get_application_id_no_id_available(loop, project, iou):
|
||||
with pytest.raises(IOUError):
|
||||
for i in range(1, 513):
|
||||
vm_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(iou.create_vm("PC {}".format(i), project.id, vm_id))
|
||||
assert iou.get_application_id(vm_id) == i
|
||||
node_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(iou.create_node("PC {}".format(i), project.id, node_id))
|
||||
assert iou.get_application_id(node_id) == i
|
||||
|
||||
|
||||
def test_get_images_directory(iou, tmpdir):
|
||||
|
@ -148,7 +148,7 @@ def test_termination_callback(vm, async_run):
|
||||
async_run(queue.get(0)) # Ping
|
||||
|
||||
(action, event, kwargs) = async_run(queue.get(0))
|
||||
assert action == "vm.stopped"
|
||||
assert action == "node.stopped"
|
||||
assert event == vm
|
||||
|
||||
|
||||
@ -167,7 +167,7 @@ def test_termination_callback_error(vm, tmpdir, async_run):
|
||||
async_run(queue.get(0)) # Ping
|
||||
|
||||
(action, event, kwargs) = queue.get_nowait()
|
||||
assert action == "vm.stopped"
|
||||
assert action == "node.stopped"
|
||||
assert event == vm
|
||||
|
||||
(action, event, kwargs) = queue.get_nowait()
|
||||
|
123
tests/compute/test_base_node.py
Normal file
123
tests/compute/test_base_node.py
Normal file
@ -0,0 +1,123 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import os
|
||||
from tests.utils import asyncio_patch
|
||||
|
||||
|
||||
from unittest.mock import patch, MagicMock
|
||||
from gns3server.compute.vpcs.vpcs_vm import VPCSVM
|
||||
from gns3server.compute.docker.docker_vm import DockerVM
|
||||
from gns3server.compute.vpcs.vpcs_error import VPCSError
|
||||
from gns3server.compute.node_error import NodeError
|
||||
from gns3server.compute.vpcs import VPCS
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def manager(port_manager):
|
||||
m = VPCS.instance()
|
||||
m.port_manager = port_manager
|
||||
return m
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def node(project, manager):
|
||||
return VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
|
||||
|
||||
def test_temporary_directory(project, manager):
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
assert isinstance(node.temporary_directory, str)
|
||||
|
||||
|
||||
def test_console(project, manager):
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
node.console = 5011
|
||||
assert node.console == 5011
|
||||
node.console = None
|
||||
assert node.console is None
|
||||
|
||||
|
||||
def test_change_console_port(node, port_manager):
|
||||
port1 = port_manager.get_free_tcp_port(node.project)
|
||||
port2 = port_manager.get_free_tcp_port(node.project)
|
||||
port_manager.release_tcp_port(port1, node.project)
|
||||
port_manager.release_tcp_port(port2, node.project)
|
||||
node.console = port1
|
||||
node.console = port2
|
||||
assert node.console == port2
|
||||
port_manager.reserve_tcp_port(port1, node.project)
|
||||
|
||||
|
||||
def test_console_vnc_invalid(project, manager):
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
node.console_type = "vnc"
|
||||
with pytest.raises(NodeError):
|
||||
node.console = 2012
|
||||
|
||||
|
||||
def test_close(node, loop, port_manager):
|
||||
assert node.console is not None
|
||||
|
||||
aux = port_manager.get_free_tcp_port(node.project)
|
||||
port_manager.release_tcp_port(aux, node.project)
|
||||
|
||||
node.aux = aux
|
||||
port = node.console
|
||||
assert loop.run_until_complete(asyncio.async(node.close()))
|
||||
# Raise an exception if the port is not free
|
||||
port_manager.reserve_tcp_port(port, node.project)
|
||||
# Raise an exception if the port is not free
|
||||
port_manager.reserve_tcp_port(aux, node.project)
|
||||
assert node.console is None
|
||||
assert node.aux is None
|
||||
|
||||
# Called twice closed should return False
|
||||
assert loop.run_until_complete(asyncio.async(node.close())) is False
|
||||
|
||||
|
||||
def test_aux(project, manager, port_manager):
|
||||
aux = port_manager.get_free_tcp_port(project)
|
||||
port_manager.release_tcp_port(aux, project)
|
||||
|
||||
node = DockerVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, "ubuntu", aux=aux)
|
||||
assert node.aux == aux
|
||||
node.aux = None
|
||||
assert node.aux is None
|
||||
|
||||
|
||||
def test_allocate_aux(project, manager):
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
assert node.aux is None
|
||||
|
||||
# Docker has an aux port by default
|
||||
node = DockerVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, "ubuntu")
|
||||
assert node.aux is not None
|
||||
|
||||
|
||||
def test_change_aux_port(node, port_manager):
|
||||
port1 = port_manager.get_free_tcp_port(node.project)
|
||||
port2 = port_manager.get_free_tcp_port(node.project)
|
||||
port_manager.release_tcp_port(port1, node.project)
|
||||
port_manager.release_tcp_port(port2, node.project)
|
||||
node.aux = port1
|
||||
node.aux = port2
|
||||
assert node.aux == port2
|
||||
port_manager.reserve_tcp_port(port1, node.project)
|
@ -1,123 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import os
|
||||
from tests.utils import asyncio_patch
|
||||
|
||||
|
||||
from unittest.mock import patch, MagicMock
|
||||
from gns3server.compute.vpcs.vpcs_vm import VPCSVM
|
||||
from gns3server.compute.docker.docker_vm import DockerVM
|
||||
from gns3server.compute.vpcs.vpcs_error import VPCSError
|
||||
from gns3server.compute.vm_error import VMError
|
||||
from gns3server.compute.vpcs import VPCS
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def manager(port_manager):
|
||||
m = VPCS.instance()
|
||||
m.port_manager = port_manager
|
||||
return m
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def vm(project, manager):
|
||||
return VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
|
||||
|
||||
def test_temporary_directory(project, manager):
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
assert isinstance(vm.temporary_directory, str)
|
||||
|
||||
|
||||
def test_console(project, manager):
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
vm.console = 5011
|
||||
assert vm.console == 5011
|
||||
vm.console = None
|
||||
assert vm.console is None
|
||||
|
||||
|
||||
def test_change_console_port(vm, port_manager):
|
||||
port1 = port_manager.get_free_tcp_port(vm.project)
|
||||
port2 = port_manager.get_free_tcp_port(vm.project)
|
||||
port_manager.release_tcp_port(port1, vm.project)
|
||||
port_manager.release_tcp_port(port2, vm.project)
|
||||
vm.console = port1
|
||||
vm.console = port2
|
||||
assert vm.console == port2
|
||||
port_manager.reserve_tcp_port(port1, vm.project)
|
||||
|
||||
|
||||
def test_console_vnc_invalid(project, manager):
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
vm.console_type = "vnc"
|
||||
with pytest.raises(VMError):
|
||||
vm.console = 2012
|
||||
|
||||
|
||||
def test_close(vm, loop, port_manager):
|
||||
assert vm.console is not None
|
||||
|
||||
aux = port_manager.get_free_tcp_port(vm.project)
|
||||
port_manager.release_tcp_port(aux, vm.project)
|
||||
|
||||
vm.aux = aux
|
||||
port = vm.console
|
||||
assert loop.run_until_complete(asyncio.async(vm.close()))
|
||||
# Raise an exception if the port is not free
|
||||
port_manager.reserve_tcp_port(port, vm.project)
|
||||
# Raise an exception if the port is not free
|
||||
port_manager.reserve_tcp_port(aux, vm.project)
|
||||
assert vm.console is None
|
||||
assert vm.aux is None
|
||||
|
||||
# Called twice closed should return False
|
||||
assert loop.run_until_complete(asyncio.async(vm.close())) is False
|
||||
|
||||
|
||||
def test_aux(project, manager, port_manager):
|
||||
aux = port_manager.get_free_tcp_port(project)
|
||||
port_manager.release_tcp_port(aux, project)
|
||||
|
||||
vm = DockerVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, "ubuntu", aux=aux)
|
||||
assert vm.aux == aux
|
||||
vm.aux = None
|
||||
assert vm.aux is None
|
||||
|
||||
|
||||
def test_allocate_aux(project, manager):
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
assert vm.aux is None
|
||||
|
||||
# Docker has an aux port by default
|
||||
vm = DockerVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, "ubuntu")
|
||||
assert vm.aux is not None
|
||||
|
||||
|
||||
def test_change_aux_port(vm, port_manager):
|
||||
port1 = port_manager.get_free_tcp_port(vm.project)
|
||||
port2 = port_manager.get_free_tcp_port(vm.project)
|
||||
port_manager.release_tcp_port(port1, vm.project)
|
||||
port_manager.release_tcp_port(port2, vm.project)
|
||||
vm.aux = port1
|
||||
vm.aux = port2
|
||||
assert vm.aux == port2
|
||||
port_manager.reserve_tcp_port(port1, vm.project)
|
@ -23,7 +23,7 @@ from unittest.mock import patch
|
||||
|
||||
from gns3server.compute.vpcs import VPCS
|
||||
from gns3server.compute.qemu import Qemu
|
||||
from gns3server.compute.vm_error import VMError
|
||||
from gns3server.compute.node_error import NodeError
|
||||
from gns3server.utils import force_unix_path
|
||||
|
||||
|
||||
@ -43,48 +43,48 @@ def qemu(port_manager):
|
||||
return qemu
|
||||
|
||||
|
||||
def test_create_vm_new_topology(loop, project, vpcs):
|
||||
vm_id = str(uuid.uuid4())
|
||||
vm = loop.run_until_complete(vpcs.create_vm("PC 1", project.id, vm_id))
|
||||
assert vm in project.vms
|
||||
def test_create_node_new_topology(loop, project, vpcs):
|
||||
node_id = str(uuid.uuid4())
|
||||
node = loop.run_until_complete(vpcs.create_node("PC 1", project.id, node_id))
|
||||
assert node in project.nodes
|
||||
|
||||
|
||||
def test_create_twice_same_vm_new_topology(loop, project, vpcs):
|
||||
project._vms = set()
|
||||
vm_id = str(uuid.uuid4())
|
||||
vm = loop.run_until_complete(vpcs.create_vm("PC 1", project.id, vm_id, console=2222))
|
||||
assert vm in project.vms
|
||||
assert len(project.vms) == 1
|
||||
vm = loop.run_until_complete(vpcs.create_vm("PC 2", project.id, vm_id, console=2222))
|
||||
assert len(project.vms) == 1
|
||||
def test_create_twice_same_node_new_topology(loop, project, vpcs):
|
||||
project._nodes = set()
|
||||
node_id = str(uuid.uuid4())
|
||||
node = loop.run_until_complete(vpcs.create_node("PC 1", project.id, node_id, console=2222))
|
||||
assert node in project.nodes
|
||||
assert len(project.nodes) == 1
|
||||
node = loop.run_until_complete(vpcs.create_node("PC 2", project.id, node_id, console=2222))
|
||||
assert len(project.nodes) == 1
|
||||
|
||||
|
||||
def test_create_vm_new_topology_without_uuid(loop, project, vpcs):
|
||||
vm = loop.run_until_complete(vpcs.create_vm("PC 1", project.id, None))
|
||||
assert vm in project.vms
|
||||
assert len(vm.id) == 36
|
||||
def test_create_node_new_topology_without_uuid(loop, project, vpcs):
|
||||
node = loop.run_until_complete(vpcs.create_node("PC 1", project.id, None))
|
||||
assert node in project.nodes
|
||||
assert len(node.id) == 36
|
||||
|
||||
|
||||
def test_create_vm_old_topology(loop, project, tmpdir, vpcs):
|
||||
def test_create_node_old_topology(loop, project, tmpdir, vpcs):
|
||||
|
||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
||||
# Create an old topology directory
|
||||
project_dir = str(tmpdir / "testold")
|
||||
vm_dir = os.path.join(project_dir, "testold-files", "vpcs", "pc-1")
|
||||
node_dir = os.path.join(project_dir, "testold-files", "vpcs", "pc-1")
|
||||
project.path = project_dir
|
||||
project.name = "testold"
|
||||
os.makedirs(vm_dir, exist_ok=True)
|
||||
with open(os.path.join(vm_dir, "startup.vpc"), "w+") as f:
|
||||
os.makedirs(node_dir, exist_ok=True)
|
||||
with open(os.path.join(node_dir, "startup.vpc"), "w+") as f:
|
||||
f.write("1")
|
||||
|
||||
vm_id = 1
|
||||
vm = loop.run_until_complete(vpcs.create_vm("PC 1", project.id, vm_id))
|
||||
assert len(vm.id) == 36
|
||||
node_id = 1
|
||||
node = loop.run_until_complete(vpcs.create_node("PC 1", project.id, node_id))
|
||||
assert len(node.id) == 36
|
||||
|
||||
assert os.path.exists(os.path.join(project_dir, "testold-files")) is False
|
||||
|
||||
vm_dir = os.path.join(project_dir, "project-files", "vpcs", vm.id)
|
||||
with open(os.path.join(vm_dir, "startup.vpc")) as f:
|
||||
node_dir = os.path.join(project_dir, "project-files", "vpcs", node.id)
|
||||
with open(os.path.join(node_dir, "startup.vpc")) as f:
|
||||
assert f.read() == "1"
|
||||
|
||||
|
||||
@ -116,9 +116,9 @@ def test_get_abs_image_path_non_local(qemu, tmpdir):
|
||||
# If non local we can't use path outside images directory
|
||||
with patch("gns3server.config.Config.get_section_config", return_value={"images_path": str(tmpdir / "images"), "local": False}):
|
||||
assert qemu.get_abs_image_path(path1) == path1
|
||||
with pytest.raises(VMError):
|
||||
with pytest.raises(NodeError):
|
||||
qemu.get_abs_image_path(path2)
|
||||
with pytest.raises(VMError):
|
||||
with pytest.raises(NodeError):
|
||||
qemu.get_abs_image_path("C:\\test2.bin")
|
||||
|
||||
with patch("gns3server.config.Config.get_section_config", return_value={"images_path": str(tmpdir / "images"), "local": True}):
|
||||
|
@ -41,9 +41,9 @@ def manager(port_manager):
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def vm(project, manager, loop):
|
||||
vm = manager.create_vm("test", project.id, "00010203-0405-0607-0809-0a0b0c0d0e0f")
|
||||
return loop.run_until_complete(asyncio.async(vm))
|
||||
def node(project, manager, loop):
|
||||
node = manager.create_node("test", project.id, "00010203-0405-0607-0809-0a0b0c0d0e0f")
|
||||
return loop.run_until_complete(asyncio.async(node))
|
||||
|
||||
|
||||
def test_affect_uuid():
|
||||
@ -123,44 +123,44 @@ def test_json(tmpdir):
|
||||
assert p.__json__() == {"name": p.name, "project_id": p.id, "temporary": False}
|
||||
|
||||
|
||||
def test_vm_working_directory(tmpdir, vm):
|
||||
def test_vm_working_directory(tmpdir, node):
|
||||
directory = Config.instance().get_section_config("Server").get("project_directory")
|
||||
|
||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
||||
p = Project(project_id=str(uuid4()))
|
||||
assert p.vm_working_directory(vm) == os.path.join(directory, p.id, 'project-files', vm.module_name, vm.id)
|
||||
assert os.path.exists(p.vm_working_directory(vm))
|
||||
assert p.node_working_directory(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
|
||||
assert os.path.exists(p.node_working_directory(node))
|
||||
|
||||
|
||||
def test_mark_vm_for_destruction(vm):
|
||||
def test_mark_node_for_destruction(node):
|
||||
project = Project(project_id=str(uuid4()))
|
||||
project.add_vm(vm)
|
||||
project.mark_vm_for_destruction(vm)
|
||||
assert len(project._vms_to_destroy) == 1
|
||||
assert len(project.vms) == 0
|
||||
project.add_node(node)
|
||||
project.mark_node_for_destruction(node)
|
||||
assert len(project._nodes_to_destroy) == 1
|
||||
assert len(project.nodes) == 0
|
||||
|
||||
|
||||
def test_commit(manager, loop):
|
||||
project = Project(project_id=str(uuid4()))
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_vm(vm)
|
||||
directory = project.vm_working_directory(vm)
|
||||
project.mark_vm_for_destruction(vm)
|
||||
assert len(project._vms_to_destroy) == 1
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_node(node)
|
||||
directory = project.node_working_directory(node)
|
||||
project.mark_node_for_destruction(node)
|
||||
assert len(project._nodes_to_destroy) == 1
|
||||
assert os.path.exists(directory)
|
||||
loop.run_until_complete(asyncio.async(project.commit()))
|
||||
assert len(project._vms_to_destroy) == 0
|
||||
assert len(project._nodes_to_destroy) == 0
|
||||
assert os.path.exists(directory) is False
|
||||
assert len(project.vms) == 0
|
||||
assert len(project.nodes) == 0
|
||||
|
||||
|
||||
def test_commit_permission_issue(manager, loop):
|
||||
project = Project(project_id=str(uuid4()))
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_vm(vm)
|
||||
directory = project.vm_working_directory(vm)
|
||||
project.mark_vm_for_destruction(vm)
|
||||
assert len(project._vms_to_destroy) == 1
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_node(node)
|
||||
directory = project.node_working_directory(node)
|
||||
project.mark_node_for_destruction(node)
|
||||
assert len(project._nodes_to_destroy) == 1
|
||||
assert os.path.exists(directory)
|
||||
os.chmod(directory, 0)
|
||||
with pytest.raises(aiohttp.web.HTTPInternalServerError):
|
||||
@ -188,17 +188,17 @@ def test_project_delete_permission_issue(loop):
|
||||
|
||||
def test_project_add_vm(manager):
|
||||
project = Project(project_id=str(uuid4()))
|
||||
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_vm(vm)
|
||||
assert len(project.vms) == 1
|
||||
node = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
|
||||
project.add_node(node)
|
||||
assert len(project.nodes) == 1
|
||||
|
||||
|
||||
def test_project_close(loop, vm, project):
|
||||
def test_project_close(loop, node, project):
|
||||
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.close") as mock:
|
||||
loop.run_until_complete(asyncio.async(project.close()))
|
||||
assert mock.called
|
||||
assert vm.id not in vm.manager._vms
|
||||
assert node.id not in node.manager._nodes
|
||||
|
||||
|
||||
def test_project_close_temporary_project(loop, manager):
|
||||
|
@ -33,13 +33,13 @@ def test_get_mac_id(loop, project, port_manager):
|
||||
vm1_id = str(uuid.uuid4())
|
||||
vm2_id = str(uuid.uuid4())
|
||||
vm3_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(vpcs.create_vm("PC 1", project.id, vm1_id))
|
||||
loop.run_until_complete(vpcs.create_vm("PC 2", project.id, vm2_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 1", project.id, vm1_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 2", project.id, vm2_id))
|
||||
assert vpcs.get_mac_id(vm1_id) == 0
|
||||
assert vpcs.get_mac_id(vm1_id) == 0
|
||||
assert vpcs.get_mac_id(vm2_id) == 1
|
||||
loop.run_until_complete(vpcs.delete_vm(vm1_id))
|
||||
loop.run_until_complete(vpcs.create_vm("PC 3", project.id, vm3_id))
|
||||
loop.run_until_complete(vpcs.delete_node(vm1_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 3", project.id, vm3_id))
|
||||
assert vpcs.get_mac_id(vm3_id) == 0
|
||||
|
||||
|
||||
@ -53,9 +53,9 @@ def test_get_mac_id_multiple_project(loop, port_manager):
|
||||
vm3_id = str(uuid.uuid4())
|
||||
project1 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
||||
project2 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
||||
loop.run_until_complete(vpcs.create_vm("PC 1", project1.id, vm1_id))
|
||||
loop.run_until_complete(vpcs.create_vm("PC 2", project1.id, vm2_id))
|
||||
loop.run_until_complete(vpcs.create_vm("PC 2", project2.id, vm3_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 1", project1.id, vm1_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 2", project1.id, vm2_id))
|
||||
loop.run_until_complete(vpcs.create_node("PC 2", project2.id, vm3_id))
|
||||
assert vpcs.get_mac_id(vm1_id) == 0
|
||||
assert vpcs.get_mac_id(vm2_id) == 1
|
||||
assert vpcs.get_mac_id(vm3_id) == 0
|
||||
@ -68,6 +68,6 @@ def test_get_mac_id_no_id_available(loop, project, port_manager):
|
||||
vpcs.port_manager = port_manager
|
||||
with pytest.raises(VPCSError):
|
||||
for i in range(0, 256):
|
||||
vm_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(vpcs.create_vm("PC {}".format(i), project.id, vm_id))
|
||||
assert vpcs.get_mac_id(vm_id) == i
|
||||
node_id = str(uuid.uuid4())
|
||||
loop.run_until_complete(vpcs.create_node("PC {}".format(i), project.id, node_id))
|
||||
assert vpcs.get_mac_id(node_id) == i
|
||||
|
@ -112,7 +112,7 @@ def test_start(loop, vm, async_run):
|
||||
assert vm.is_running()
|
||||
assert vm.command_line == ' '.join(mock_exec.call_args[0])
|
||||
(action, event, kwargs) = async_run(queue.get(0))
|
||||
assert action == "vm.started"
|
||||
assert action == "node.started"
|
||||
assert event == vm
|
||||
|
||||
|
||||
@ -177,7 +177,7 @@ def test_stop(loop, vm, async_run):
|
||||
async_run(queue.get(0)) # Started
|
||||
|
||||
(action, event, kwargs) = async_run(queue.get(0))
|
||||
assert action == "vm.stopped"
|
||||
assert action == "node.stopped"
|
||||
assert event == vm
|
||||
|
||||
|
||||
|
@ -75,16 +75,16 @@ def test_isEnabled(controller):
|
||||
|
||||
|
||||
def test_addCompute(controller, controller_config_path, async_run):
|
||||
async_run(controller.addCompute("test1"))
|
||||
async_run(controller.add_compute("test1"))
|
||||
assert len(controller.computes) == 1
|
||||
async_run(controller.addCompute("test1"))
|
||||
async_run(controller.add_compute("test1"))
|
||||
assert len(controller.computes) == 1
|
||||
async_run(controller.addCompute("test2"))
|
||||
async_run(controller.add_compute("test2"))
|
||||
assert len(controller.computes) == 2
|
||||
|
||||
|
||||
def test_addComputeConfigFile(controller, controller_config_path, async_run):
|
||||
async_run(controller.addCompute("test1"))
|
||||
async_run(controller.add_compute("test1"))
|
||||
assert len(controller.computes) == 1
|
||||
with open(controller_config_path) as f:
|
||||
data = json.load(f)
|
||||
@ -101,7 +101,7 @@ def test_addComputeConfigFile(controller, controller_config_path, async_run):
|
||||
|
||||
|
||||
def test_getCompute(controller, async_run):
|
||||
compute = async_run(controller.addCompute("test1"))
|
||||
compute = async_run(controller.add_compute("test1"))
|
||||
|
||||
assert controller.getCompute("test1") == compute
|
||||
with pytest.raises(aiohttp.web.HTTPNotFound):
|
||||
@ -145,9 +145,9 @@ def test_getProject(controller, async_run):
|
||||
uuid1 = str(uuid.uuid4())
|
||||
|
||||
project = async_run(controller.addProject(project_id=uuid1))
|
||||
assert controller.getProject(uuid1) == project
|
||||
assert controller.get_project(uuid1) == project
|
||||
with pytest.raises(aiohttp.web.HTTPNotFound):
|
||||
assert controller.getProject("dsdssd")
|
||||
assert controller.get_project("dsdssd")
|
||||
|
||||
|
||||
def test_emit(controller, async_run):
|
||||
|
@ -22,7 +22,7 @@ from unittest.mock import MagicMock
|
||||
|
||||
|
||||
from gns3server.controller.link import Link
|
||||
from gns3server.controller.vm import VM
|
||||
from gns3server.controller.node import Node
|
||||
from gns3server.controller.compute import Compute
|
||||
from gns3server.controller.project import Project
|
||||
|
||||
@ -41,23 +41,23 @@ def compute():
|
||||
|
||||
@pytest.fixture
|
||||
def link(async_run, project, compute):
|
||||
vm1 = VM(project, compute)
|
||||
vm2 = VM(project, compute)
|
||||
node1 = Node(project, compute)
|
||||
node2 = Node(project, compute)
|
||||
|
||||
link = Link(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
async_run(link.addVM(vm2, 1, 3))
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
async_run(link.add_node(node2, 1, 3))
|
||||
return link
|
||||
|
||||
|
||||
def test_addVM(async_run, project, compute):
|
||||
vm1 = VM(project, compute)
|
||||
def test_addNode(async_run, project, compute):
|
||||
node1 = Node(project, compute)
|
||||
|
||||
link = Link(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
assert link._vms == [
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
assert link._nodes == [
|
||||
{
|
||||
"vm": vm1,
|
||||
"node": node1,
|
||||
"adapter_number": 0,
|
||||
"port_number": 4
|
||||
}
|
||||
@ -65,22 +65,22 @@ def test_addVM(async_run, project, compute):
|
||||
|
||||
|
||||
def test_json(async_run, project, compute):
|
||||
vm1 = VM(project, compute)
|
||||
vm2 = VM(project, compute)
|
||||
node1 = Node(project, compute)
|
||||
node2 = Node(project, compute)
|
||||
|
||||
link = Link(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
async_run(link.addVM(vm2, 1, 3))
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
async_run(link.add_node(node2, 1, 3))
|
||||
assert link.__json__() == {
|
||||
"link_id": link.id,
|
||||
"vms": [
|
||||
"nodes": [
|
||||
{
|
||||
"vm_id": vm1.id,
|
||||
"node_id": node1.id,
|
||||
"adapter_number": 0,
|
||||
"port_number": 4
|
||||
},
|
||||
{
|
||||
"vm_id": vm2.id,
|
||||
"node_id": node2.id,
|
||||
"adapter_number": 1,
|
||||
"port_number": 3
|
||||
}
|
||||
@ -109,10 +109,10 @@ def test_start_streaming_pcap(link, async_run, tmpdir, project):
|
||||
|
||||
|
||||
def test_default_capture_file_name(project, compute, async_run):
|
||||
vm1 = VM(project, compute, name="Hello@")
|
||||
vm2 = VM(project, compute, name="w0.rld")
|
||||
node1 = Node(project, compute, name="Hello@")
|
||||
node2 = Node(project, compute, name="w0.rld")
|
||||
|
||||
link = Link(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
async_run(link.addVM(vm2, 1, 3))
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
async_run(link.add_node(node2, 1, 3))
|
||||
assert link.default_capture_file_name() == "Hello_0-4_to_w0rld_1-3.pcap"
|
||||
|
171
tests/controller/test_node.py
Normal file
171
tests/controller/test_node.py
Normal file
@ -0,0 +1,171 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import uuid
|
||||
import asyncio
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
||||
from tests.utils import AsyncioMagicMock
|
||||
|
||||
from gns3server.controller.node import Node
|
||||
from gns3server.controller.project import Project
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def compute():
|
||||
s = AsyncioMagicMock()
|
||||
s.id = "http://test.com:42"
|
||||
return s
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def node(compute):
|
||||
project = Project(str(uuid.uuid4()))
|
||||
node = Node(project, compute,
|
||||
name="demo",
|
||||
node_id=str(uuid.uuid4()),
|
||||
node_type="vpcs",
|
||||
console_type="vnc",
|
||||
properties={"startup_script": "echo test"})
|
||||
return node
|
||||
|
||||
|
||||
def test_json(node, compute):
|
||||
assert node.__json__() == {
|
||||
"compute_id": compute.id,
|
||||
"project_id": node.project.id,
|
||||
"node_id": node.id,
|
||||
"node_type": node.node_type,
|
||||
"name": "demo",
|
||||
"console": node.console,
|
||||
"console_type": node.console_type,
|
||||
"properties": node.properties
|
||||
}
|
||||
|
||||
|
||||
def test_init_without_uuid(project, compute):
|
||||
node = Node(project, compute,
|
||||
node_type="vpcs",
|
||||
console_type="vnc")
|
||||
assert node.id is not None
|
||||
|
||||
|
||||
def test_create(node, compute, project, async_run):
|
||||
node._console = 2048
|
||||
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(node.create())
|
||||
data = {
|
||||
"console": 2048,
|
||||
"console_type": "vnc",
|
||||
"node_id": node.id,
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes".format(node.project.id), data=data)
|
||||
assert node._console == 2048
|
||||
assert node._properties == {"startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_update(node, compute, project, async_run):
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048}
|
||||
compute.put = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(node.update(console=2048, console_type="vnc", properties={"startup_script": "echo test"}, name="demo"))
|
||||
data = {
|
||||
"console": 2048,
|
||||
"console_type": "vnc",
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.put.assert_called_with("/projects/{}/vpcs/nodes/{}".format(node.project.id, node.id), data=data)
|
||||
assert node._console == 2048
|
||||
assert node._properties == {"startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_start(node, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(node.start())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes/{}/start".format(node.project.id, node.id))
|
||||
|
||||
|
||||
def test_stop(node, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(node.stop())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes/{}/stop".format(node.project.id, node.id))
|
||||
|
||||
|
||||
def test_suspend(node, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(node.suspend())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes/{}/suspend".format(node.project.id, node.id))
|
||||
|
||||
|
||||
def test_reload(node, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(node.reload())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes/{}/reload".format(node.project.id, node.id))
|
||||
|
||||
|
||||
def test_create_without_console(node, compute, project, async_run):
|
||||
"""
|
||||
None properties should be send. Because it can mean the emulator doesn"t support it
|
||||
"""
|
||||
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048, "test_value": "success"}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(node.create())
|
||||
data = {
|
||||
"console_type": "vnc",
|
||||
"node_id": node.id,
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes".format(node.project.id), data=data)
|
||||
assert node._console == 2048
|
||||
assert node._properties == {"test_value": "success", "startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_delete(node, compute, async_run):
|
||||
async_run(node.destroy())
|
||||
compute.delete.assert_called_with("/projects/{}/vpcs/nodes/{}".format(node.project.id, node.id))
|
||||
|
||||
|
||||
def test_post(node, compute, async_run):
|
||||
async_run(node.post("/test", {"a": "b"}))
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/nodes/{}/test".format(node.project.id, node.id), data={"a": "b"})
|
||||
|
||||
|
||||
def test_delete(node, compute, async_run):
|
||||
async_run(node.delete("/test"))
|
||||
compute.delete.assert_called_with("/projects/{}/vpcs/nodes/{}/test".format(node.project.id, node.id))
|
@ -77,10 +77,10 @@ def test_addVM(async_run):
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
vm = async_run(project.addVM(compute, None, name="test", vm_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
vm = async_run(project.add_node(compute, None, name="test", node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
|
||||
compute.post.assert_called_with('/projects/{}/vpcs/vms'.format(project.id),
|
||||
data={'vm_id': vm.id,
|
||||
compute.post.assert_called_with('/projects/{}/vpcs/nodes'.format(project.id),
|
||||
data={'node_id': vm.id,
|
||||
'console_type': 'telnet',
|
||||
'startup_config': 'test.cfg',
|
||||
'name': 'test'})
|
||||
@ -94,11 +94,11 @@ def test_getVM(async_run):
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
vm = async_run(project.addVM(compute, None, name="test", vm_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
assert project.getVM(vm.id) == vm
|
||||
vm = async_run(project.add_node(compute, None, name="test", node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
assert project.get_node(vm.id) == vm
|
||||
|
||||
with pytest.raises(aiohttp.web_exceptions.HTTPNotFound):
|
||||
project.getVM("test")
|
||||
project.get_node("test")
|
||||
|
||||
|
||||
def test_addLink(async_run):
|
||||
@ -109,12 +109,12 @@ def test_addLink(async_run):
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
vm1 = async_run(project.addVM(compute, None, name="test1", vm_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
vm2 = async_run(project.addVM(compute, None, name="test2", vm_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
link = async_run(project.addLink())
|
||||
async_run(link.addVM(vm1, 3, 1))
|
||||
async_run(link.addVM(vm2, 4, 2))
|
||||
assert len(link._vms) == 2
|
||||
vm1 = async_run(project.add_node(compute, None, name="test1", node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
vm2 = async_run(project.add_node(compute, None, name="test2", node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||
link = async_run(project.add_link())
|
||||
async_run(link.add_node(vm1, 3, 1))
|
||||
async_run(link.add_node(vm2, 4, 2))
|
||||
assert len(link._nodes) == 2
|
||||
|
||||
|
||||
def test_getLink(async_run):
|
||||
@ -125,11 +125,11 @@ def test_getLink(async_run):
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
link = async_run(project.addLink())
|
||||
assert project.getLink(link.id) == link
|
||||
link = async_run(project.add_link())
|
||||
assert project.get_link(link.id) == link
|
||||
|
||||
with pytest.raises(aiohttp.web_exceptions.HTTPNotFound):
|
||||
project.getLink("test")
|
||||
project.get_link("test")
|
||||
|
||||
|
||||
def test_emit(async_run):
|
||||
|
@ -22,9 +22,8 @@ from unittest.mock import MagicMock
|
||||
from tests.utils import asyncio_patch
|
||||
|
||||
from gns3server.controller.project import Project
|
||||
from gns3server.controller.compute import Compute
|
||||
from gns3server.controller.udp_link import UDPLink
|
||||
from gns3server.controller.vm import VM
|
||||
from gns3server.controller.node import Node
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -36,12 +35,12 @@ def test_create(async_run, project):
|
||||
compute1 = MagicMock()
|
||||
compute2 = MagicMock()
|
||||
|
||||
vm1 = VM(project, compute1, vm_type="vpcs")
|
||||
vm2 = VM(project, compute2, vm_type="vpcs")
|
||||
node1 = Node(project, compute1, node_type="vpcs")
|
||||
node2 = Node(project, compute2, node_type="vpcs")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
async_run(link.addVM(vm2, 3, 1))
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
async_run(link.add_node(node2, 3, 1))
|
||||
|
||||
@asyncio.coroutine
|
||||
def compute1_callback(path, data={}):
|
||||
@ -69,13 +68,13 @@ def test_create(async_run, project):
|
||||
compute2.host = "example.org"
|
||||
async_run(link.create())
|
||||
|
||||
compute1.post.assert_any_call("/projects/{}/vpcs/vms/{}/adapters/0/ports/4/nio".format(project.id, vm1.id), data={
|
||||
compute1.post.assert_any_call("/projects/{}/vpcs/nodes/{}/adapters/0/ports/4/nio".format(project.id, node1.id), data={
|
||||
"lport": 1024,
|
||||
"rhost": compute2.host,
|
||||
"rport": 2048,
|
||||
"type": "nio_udp"
|
||||
})
|
||||
compute2.post.assert_any_call("/projects/{}/vpcs/vms/{}/adapters/3/ports/1/nio".format(project.id, vm2.id), data={
|
||||
compute2.post.assert_any_call("/projects/{}/vpcs/nodes/{}/adapters/3/ports/1/nio".format(project.id, node2.id), data={
|
||||
"lport": 2048,
|
||||
"rhost": compute1.host,
|
||||
"rport": 1024,
|
||||
@ -87,17 +86,17 @@ def test_delete(async_run, project):
|
||||
compute1 = MagicMock()
|
||||
compute2 = MagicMock()
|
||||
|
||||
vm1 = VM(project, compute1, vm_type="vpcs")
|
||||
vm2 = VM(project, compute2, vm_type="vpcs")
|
||||
node1 = Node(project, compute1, node_type="vpcs")
|
||||
node2 = Node(project, compute2, node_type="vpcs")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm1, 0, 4))
|
||||
async_run(link.addVM(vm2, 3, 1))
|
||||
async_run(link.add_node(node1, 0, 4))
|
||||
async_run(link.add_node(node2, 3, 1))
|
||||
|
||||
async_run(link.delete())
|
||||
|
||||
compute1.delete.assert_any_call("/projects/{}/vpcs/vms/{}/adapters/0/ports/4/nio".format(project.id, vm1.id))
|
||||
compute2.delete.assert_any_call("/projects/{}/vpcs/vms/{}/adapters/3/ports/1/nio".format(project.id, vm2.id))
|
||||
compute1.delete.assert_any_call("/projects/{}/vpcs/nodes/{}/adapters/0/ports/4/nio".format(project.id, node1.id))
|
||||
compute2.delete.assert_any_call("/projects/{}/vpcs/nodes/{}/adapters/3/ports/1/nio".format(project.id, node2.id))
|
||||
|
||||
|
||||
def test_choose_capture_side(async_run, project):
|
||||
@ -108,51 +107,51 @@ def test_choose_capture_side(async_run, project):
|
||||
compute2 = MagicMock()
|
||||
compute2.id = "local"
|
||||
|
||||
vm_vpcs = VM(project, compute1, vm_type="vpcs")
|
||||
vm_iou = VM(project, compute2, vm_type="iou")
|
||||
node_vpcs = Node(project, compute1, node_type="vpcs")
|
||||
node_iou = Node(project, compute2, node_type="iou")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm_vpcs, 0, 4))
|
||||
async_run(link.addVM(vm_iou, 3, 1))
|
||||
async_run(link.add_node(node_vpcs, 0, 4))
|
||||
async_run(link.add_node(node_iou, 3, 1))
|
||||
|
||||
assert link._choose_capture_side()["vm"] == vm_iou
|
||||
assert link._choose_capture_side()["node"] == node_iou
|
||||
|
||||
vm_vpcs = VM(project, compute1, vm_type="vpcs")
|
||||
vm_vpcs2 = VM(project, compute1, vm_type="vpcs")
|
||||
node_vpcs = Node(project, compute1, node_type="vpcs")
|
||||
node_vpcs2 = Node(project, compute1, node_type="vpcs")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm_vpcs, 0, 4))
|
||||
async_run(link.addVM(vm_vpcs2, 3, 1))
|
||||
async_run(link.add_node(node_vpcs, 0, 4))
|
||||
async_run(link.add_node(node_vpcs2, 3, 1))
|
||||
|
||||
# VPCS doesn't support capture
|
||||
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||
link._choose_capture_side()["vm"]
|
||||
link._choose_capture_side()["node"]
|
||||
|
||||
# Capture should run on the local node
|
||||
vm_iou = VM(project, compute1, vm_type="iou")
|
||||
vm_iou2 = VM(project, compute2, vm_type="iou")
|
||||
node_iou = Node(project, compute1, node_type="iou")
|
||||
node_iou2 = Node(project, compute2, node_type="iou")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm_iou, 0, 4))
|
||||
async_run(link.addVM(vm_iou2, 3, 1))
|
||||
async_run(link.add_node(node_iou, 0, 4))
|
||||
async_run(link.add_node(node_iou2, 3, 1))
|
||||
|
||||
assert link._choose_capture_side()["vm"] == vm_iou2
|
||||
assert link._choose_capture_side()["node"] == node_iou2
|
||||
|
||||
|
||||
def test_capture(async_run, project):
|
||||
compute1 = MagicMock()
|
||||
|
||||
vm_vpcs = VM(project, compute1, vm_type="vpcs", name="V1")
|
||||
vm_iou = VM(project, compute1, vm_type="iou", name="I1")
|
||||
node_vpcs = Node(project, compute1, node_type="vpcs", name="V1")
|
||||
node_iou = Node(project, compute1, node_type="iou", name="I1")
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(vm_vpcs, 0, 4))
|
||||
async_run(link.addVM(vm_iou, 3, 1))
|
||||
async_run(link.add_node(node_vpcs, 0, 4))
|
||||
async_run(link.add_node(node_iou, 3, 1))
|
||||
|
||||
capture = async_run(link.start_capture())
|
||||
assert link.capturing
|
||||
|
||||
compute1.post.assert_any_call("/projects/{}/iou/vms/{}/adapters/3/ports/1/start_capture".format(project.id, vm_iou.id), data={
|
||||
compute1.post.assert_any_call("/projects/{}/iou/nodes/{}/adapters/3/ports/1/start_capture".format(project.id, node_iou.id), data={
|
||||
"capture_file_name": link.default_capture_file_name(),
|
||||
"data_link_type": "DLT_EN10MB"
|
||||
})
|
||||
@ -160,18 +159,18 @@ def test_capture(async_run, project):
|
||||
capture = async_run(link.stop_capture())
|
||||
assert link.capturing is False
|
||||
|
||||
compute1.post.assert_any_call("/projects/{}/iou/vms/{}/adapters/3/ports/1/stop_capture".format(project.id, vm_iou.id))
|
||||
compute1.post.assert_any_call("/projects/{}/iou/nodes/{}/adapters/3/ports/1/stop_capture".format(project.id, node_iou.id))
|
||||
|
||||
|
||||
def test_read_pcap_from_source(project, async_run):
|
||||
compute1 = MagicMock()
|
||||
|
||||
link = UDPLink(project)
|
||||
async_run(link.addVM(compute1, 0, 4))
|
||||
async_run(link.addVM(compute1, 3, 1))
|
||||
async_run(link.add_node(compute1, 0, 4))
|
||||
async_run(link.add_node(compute1, 3, 1))
|
||||
|
||||
capture = async_run(link.start_capture())
|
||||
assert link._capture_vm is not None
|
||||
assert link._capture_node is not None
|
||||
|
||||
async_run(link.read_pcap_from_source())
|
||||
link._capture_vm["vm"].compute.streamFile.assert_called_with(project, "tmp/captures/" + link._capture_file_name)
|
||||
link._capture_node["node"].compute.streamFile.assert_called_with(project, "tmp/captures/" + link._capture_file_name)
|
||||
|
@ -1,171 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import uuid
|
||||
import asyncio
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
|
||||
from tests.utils import AsyncioMagicMock
|
||||
|
||||
from gns3server.controller.vm import VM
|
||||
from gns3server.controller.project import Project
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def compute():
|
||||
s = AsyncioMagicMock()
|
||||
s.id = "http://test.com:42"
|
||||
return s
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vm(compute):
|
||||
project = Project(str(uuid.uuid4()))
|
||||
vm = VM(project, compute,
|
||||
name="demo",
|
||||
vm_id=str(uuid.uuid4()),
|
||||
vm_type="vpcs",
|
||||
console_type="vnc",
|
||||
properties={"startup_script": "echo test"})
|
||||
return vm
|
||||
|
||||
|
||||
def test_json(vm, compute):
|
||||
assert vm.__json__() == {
|
||||
"compute_id": compute.id,
|
||||
"project_id": vm.project.id,
|
||||
"vm_id": vm.id,
|
||||
"vm_type": vm.vm_type,
|
||||
"name": "demo",
|
||||
"console": vm.console,
|
||||
"console_type": vm.console_type,
|
||||
"properties": vm.properties
|
||||
}
|
||||
|
||||
|
||||
def test_init_without_uuid(project, compute):
|
||||
vm = VM(project, compute,
|
||||
vm_type="vpcs",
|
||||
console_type="vnc")
|
||||
assert vm.id is not None
|
||||
|
||||
|
||||
def test_create(vm, compute, project, async_run):
|
||||
vm._console = 2048
|
||||
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(vm.create())
|
||||
data = {
|
||||
"console": 2048,
|
||||
"console_type": "vnc",
|
||||
"vm_id": vm.id,
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms".format(vm.project.id), data=data)
|
||||
assert vm._console == 2048
|
||||
assert vm._properties == {"startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_update(vm, compute, project, async_run):
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048}
|
||||
compute.put = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(vm.update(console=2048, console_type="vnc", properties={"startup_script": "echo test"}, name="demo"))
|
||||
data = {
|
||||
"console": 2048,
|
||||
"console_type": "vnc",
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.put.assert_called_with("/projects/{}/vpcs/vms/{}".format(vm.project.id, vm.id), data=data)
|
||||
assert vm._console == 2048
|
||||
assert vm._properties == {"startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_start(vm, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(vm.start())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms/{}/start".format(vm.project.id, vm.id))
|
||||
|
||||
|
||||
def test_stop(vm, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(vm.stop())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms/{}/stop".format(vm.project.id, vm.id))
|
||||
|
||||
|
||||
def test_suspend(vm, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(vm.suspend())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms/{}/suspend".format(vm.project.id, vm.id))
|
||||
|
||||
|
||||
def test_reload(vm, compute, project, async_run):
|
||||
|
||||
compute.post = AsyncioMagicMock()
|
||||
|
||||
async_run(vm.reload())
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms/{}/reload".format(vm.project.id, vm.id))
|
||||
|
||||
|
||||
def test_create_without_console(vm, compute, project, async_run):
|
||||
"""
|
||||
None properties should be send. Because it can mean the emulator doesn"t support it
|
||||
"""
|
||||
|
||||
response = MagicMock()
|
||||
response.json = {"console": 2048, "test_value": "success"}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
async_run(vm.create())
|
||||
data = {
|
||||
"console_type": "vnc",
|
||||
"vm_id": vm.id,
|
||||
"startup_script": "echo test",
|
||||
"name": "demo"
|
||||
}
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms".format(vm.project.id), data=data)
|
||||
assert vm._console == 2048
|
||||
assert vm._properties == {"test_value": "success", "startup_script": "echo test"}
|
||||
|
||||
|
||||
def test_delete(vm, compute, async_run):
|
||||
async_run(vm.destroy())
|
||||
compute.delete.assert_called_with("/projects/{}/vpcs/vms/{}".format(vm.project.id, vm.id))
|
||||
|
||||
|
||||
def test_post(vm, compute, async_run):
|
||||
async_run(vm.post("/test", {"a": "b"}))
|
||||
compute.post.assert_called_with("/projects/{}/vpcs/vms/{}/test".format(vm.project.id, vm.id), data={"a": "b"})
|
||||
|
||||
|
||||
def test_delete(vm, compute, async_run):
|
||||
async_run(vm.delete("/test"))
|
||||
compute.delete.assert_called_with("/projects/{}/vpcs/vms/{}/test".format(vm.project.id, vm.id))
|
@ -46,7 +46,7 @@ def mock_connection():
|
||||
def vm(http_compute, project, base_params):
|
||||
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes".format(project_id=project.id), base_params)
|
||||
if response.status != 201:
|
||||
print(response.body)
|
||||
assert response.status == 201
|
||||
@ -56,9 +56,9 @@ def vm(http_compute, project, base_params):
|
||||
def test_docker_create(http_compute, project, base_params):
|
||||
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/docker/vms"
|
||||
assert response.route == "/projects/{project_id}/docker/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["container_id"] == "8bd8153ea8f5"
|
||||
@ -70,63 +70,63 @@ def test_docker_create(http_compute, project, base_params):
|
||||
|
||||
def test_docker_start(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_stop(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.stop", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.restart", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/reload".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_delete(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.delete", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.pause", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/suspend".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_nio_create_udp(http_compute, vm):
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
def test_docker_delete_nio(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_docker_update(http_compute, vm, tmpdir, free_console_port):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.update") as mock:
|
||||
response = http_compute.put("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||
"console": free_console_port,
|
||||
"start_command": "yes",
|
||||
"environment": "GNS3=1\nGNS4=0"},
|
||||
response = http_compute.put("/projects/{project_id}/docker/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"name": "test",
|
||||
"console": free_console_port,
|
||||
"start_command": "yes",
|
||||
"environment": "GNS3=1\nGNS4=0"},
|
||||
example=True)
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
@ -142,7 +142,7 @@ def test_docker_start_capture(http_compute, vm, tmpdir, project):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), body=params, example=True)
|
||||
|
||||
assert response.status == 200
|
||||
|
||||
@ -156,7 +156,7 @@ def test_docker_start_capture_not_started(http_compute, vm, tmpdir):
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params)
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), body=params)
|
||||
|
||||
assert not start_capture.called
|
||||
assert response.status == 409
|
||||
@ -167,7 +167,7 @@ def test_docker_stop_capture(http_compute, vm, tmpdir, project):
|
||||
with patch("gns3server.compute.docker.docker_vm.DockerVM.is_running", return_value=True) as mock:
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
|
||||
assert response.status == 204
|
||||
|
||||
@ -179,7 +179,7 @@ def test_docker_stop_capture_not_started(http_compute, vm, tmpdir):
|
||||
with patch("gns3server.compute.docker.docker_vm.DockerVM.is_running", return_value=False) as mock:
|
||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
|
||||
assert not stop_capture.called
|
||||
assert response.status == 409
|
||||
|
@ -28,7 +28,7 @@ from tests.utils import asyncio_patch
|
||||
#
|
||||
# dynamips_path = "/fake/dynamips"
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.create", return_value=True) as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms".format(project_id=project.id), {"name": "My router",
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes".format(project_id=project.id), {"name": "My router",
|
||||
# "platform": "c3745",
|
||||
# "image": "somewhere",
|
||||
# "ram": 128})
|
||||
@ -42,7 +42,7 @@ from tests.utils import asyncio_patch
|
||||
# def test_dynamips_vm_create(http_compute, project):
|
||||
#
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.create", return_value=True):
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms".format(project_id=project.id), {"name": "My router",
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes".format(project_id=project.id), {"name": "My router",
|
||||
# "platform": "c3745",
|
||||
# "image": "somewhere",
|
||||
# "ram": 128},
|
||||
@ -54,37 +54,37 @@ from tests.utils import asyncio_patch
|
||||
#
|
||||
#
|
||||
# def test_dynamips_vm_get(http_compute, project, vm):
|
||||
# response = http_compute.get("/projects/{project_id}/dynamips/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
# response = http_compute.get("/projects/{project_id}/dynamips/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
# assert response.status == 200
|
||||
# assert response.route == "/projects/{project_id}/dynamips/vms/{vm_id}"
|
||||
# assert response.route == "/projects/{project_id}/dynamips/nodes/{node_id}"
|
||||
# assert response.json["name"] == "My router"
|
||||
# assert response.json["project_id"] == project.id
|
||||
#
|
||||
#
|
||||
# def test_dynamips_vm_start(http_compute, vm):
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.start", return_value=True) as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
# assert mock.called
|
||||
# assert response.status == 204
|
||||
#
|
||||
#
|
||||
# def test_dynamips_vm_stop(http_compute, vm):
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.stop", return_value=True) as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
# assert mock.called
|
||||
# assert response.status == 204
|
||||
#
|
||||
#
|
||||
# def test_dynamips_vm_suspend(http_compute, vm):
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.suspend", return_value=True) as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes/{node_id}/suspend".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
# assert mock.called
|
||||
# assert response.status == 204
|
||||
#
|
||||
#
|
||||
# def test_dynamips_vm_resume(http_compute, vm):
|
||||
# with asyncio_patch("gns3server.compute.dynamips.nodes.router.Router.resume", return_value=True) as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/vms/{vm_id}/resume".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
# response = http_compute.post("/projects/{project_id}/dynamips/nodes/{node_id}/resume".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
# assert mock.called
|
||||
# assert response.status == 204
|
||||
|
||||
@ -92,8 +92,8 @@ from tests.utils import asyncio_patch
|
||||
# def test_vbox_nio_create_udp(http_compute, vm):
|
||||
#
|
||||
# with asyncio_patch('gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.adapter_add_nio_binding') as mock:
|
||||
# response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/0/nio".format(project_id=vm["project_id"],
|
||||
# vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
# response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/0/nio".format(project_id=vm["project_id"],
|
||||
# node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
# "lport": 4242,
|
||||
# "rport": 4343,
|
||||
# "rhost": "127.0.0.1"},
|
||||
@ -104,25 +104,25 @@ from tests.utils import asyncio_patch
|
||||
# assert args[0] == 0
|
||||
#
|
||||
# assert response.status == 201
|
||||
# assert response.route == "/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_id:\d+}/nio"
|
||||
# assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_id:\d+}/nio"
|
||||
# assert response.json["type"] == "nio_udp"
|
||||
#
|
||||
#
|
||||
# def test_vbox_delete_nio(http_compute, vm):
|
||||
#
|
||||
# with asyncio_patch('gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.adapter_remove_nio_binding') as mock:
|
||||
# response = http_compute.delete("/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
# response = http_compute.delete("/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
#
|
||||
# assert mock.called
|
||||
# args, kwgars = mock.call_args
|
||||
# assert args[0] == 0
|
||||
#
|
||||
# assert response.status == 204
|
||||
# assert response.route == "/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_id:\d+}/nio"
|
||||
# assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_id:\d+}/nio"
|
||||
#
|
||||
#
|
||||
# def test_vbox_update(http_compute, vm, free_console_port):
|
||||
# response = http_compute.put("/projects/{project_id}/virtualbox/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||
# response = http_compute.put("/projects/{project_id}/virtualbox/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"name": "test",
|
||||
# "console": free_console_port})
|
||||
# assert response.status == 200
|
||||
# assert response.json["name"] == "test"
|
||||
@ -154,14 +154,14 @@ def fake_file(tmpdir):
|
||||
def test_vms(http_compute, tmpdir, fake_dynamips, fake_file):
|
||||
|
||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir), example=True):
|
||||
response = http_compute.get("/dynamips/vms")
|
||||
response = http_compute.get("/dynamips/nodes")
|
||||
assert response.status == 200
|
||||
assert response.json == [{"filename": "7200.bin", "path": "7200.bin"}]
|
||||
|
||||
|
||||
def test_upload_vm(http_compute, tmpdir):
|
||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/dynamips/vms/test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/dynamips/nodes/test2", body="TEST", raw=True)
|
||||
assert response.status == 204
|
||||
|
||||
with open(str(tmpdir / "test2")) as f:
|
||||
@ -178,5 +178,5 @@ def test_upload_vm_permission_denied(http_compute, tmpdir):
|
||||
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||
|
||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/dynamips/vms/test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/dynamips/nodes/test2", body="TEST", raw=True)
|
||||
assert response.status == 409
|
||||
|
@ -47,21 +47,21 @@ def base_params(tmpdir, fake_iou_bin):
|
||||
|
||||
@pytest.fixture
|
||||
def vm(http_compute, project, base_params):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
return response.json
|
||||
|
||||
|
||||
def startup_config_file(project, vm):
|
||||
directory = os.path.join(project.path, "project-files", "iou", vm["vm_id"])
|
||||
directory = os.path.join(project.path, "project-files", "iou", vm["node_id"])
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
return os.path.join(directory, "startup-config.cfg")
|
||||
|
||||
|
||||
def test_iou_create(http_compute, project, base_params):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["serial_adapters"] == 2
|
||||
@ -82,9 +82,9 @@ def test_iou_create_with_params(http_compute, project, base_params):
|
||||
params["use_default_iou_values"] = True
|
||||
params["iourc_content"] = "test"
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms".format(project_id=project.id), params, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes".format(project_id=project.id), params, example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["serial_adapters"] == 4
|
||||
@ -104,18 +104,18 @@ def test_iou_create_with_params(http_compute, project, base_params):
|
||||
def test_iou_create_startup_config_already_exist(http_compute, project, base_params):
|
||||
"""We don't erase a startup-config if already exist at project creation"""
|
||||
|
||||
vm_id = str(uuid.uuid4())
|
||||
startup_config_file_path = startup_config_file(project, {'vm_id': vm_id})
|
||||
node_id = str(uuid.uuid4())
|
||||
startup_config_file_path = startup_config_file(project, {'node_id': node_id})
|
||||
with open(startup_config_file_path, 'w+') as f:
|
||||
f.write("echo hello")
|
||||
|
||||
params = base_params
|
||||
params["vm_id"] = vm_id
|
||||
params["node_id"] = node_id
|
||||
params["startup_config_content"] = "hostname test"
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms".format(project_id=project.id), params, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes".format(project_id=project.id), params, example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes"
|
||||
|
||||
assert "startup-config.cfg" in response.json["startup_config"]
|
||||
with open(startup_config_file(project, response.json)) as f:
|
||||
@ -123,9 +123,9 @@ def test_iou_create_startup_config_already_exist(http_compute, project, base_par
|
||||
|
||||
|
||||
def test_iou_get(http_compute, project, vm):
|
||||
response = http_compute.get("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/iou/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["serial_adapters"] == 2
|
||||
@ -137,7 +137,7 @@ def test_iou_get(http_compute, project, vm):
|
||||
|
||||
def test_iou_start(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
@ -147,11 +147,11 @@ def test_iou_start_with_iourc(http_compute, vm, tmpdir):
|
||||
body = {"iourc_content": "test"}
|
||||
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=body, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]), body=body, example=True)
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
|
||||
response = http_compute.get("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.get("/projects/{project_id}/iou/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
assert response.status == 200
|
||||
with open(response.json["iourc_path"]) as f:
|
||||
assert f.read() == "test"
|
||||
@ -159,21 +159,21 @@ def test_iou_start_with_iourc(http_compute, vm, tmpdir):
|
||||
|
||||
def test_iou_stop(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.stop", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_iou_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.reload", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/reload".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_iou_delete(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.iou.IOU.delete_vm", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
with asyncio_patch("gns3server.compute.iou.IOU.delete_node", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/iou/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
@ -191,7 +191,7 @@ def test_iou_update(http_compute, vm, tmpdir, free_console_port, project):
|
||||
"use_default_iou_values": True,
|
||||
"iourc_content": "test"
|
||||
}
|
||||
response = http_compute.put("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), params, example=True)
|
||||
response = http_compute.put("/projects/{project_id}/iou/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), params, example=True)
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "test"
|
||||
assert response.json["console"] == free_console_port
|
||||
@ -209,55 +209,55 @@ def test_iou_update(http_compute, vm, tmpdir, free_console_port, project):
|
||||
|
||||
|
||||
def test_iou_nio_create_udp(http_compute, vm):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
def test_iou_nio_create_ethernet(http_compute, vm, ethernet_device):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": ethernet_device,
|
||||
},
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": ethernet_device,
|
||||
},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_generic_ethernet"
|
||||
assert response.json["ethernet_device"] == ethernet_device
|
||||
|
||||
|
||||
def test_iou_nio_create_ethernet_different_port(http_compute, vm, ethernet_device):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/0/ports/3/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": ethernet_device,
|
||||
},
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/0/ports/3/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": ethernet_device,
|
||||
},
|
||||
example=False)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_generic_ethernet"
|
||||
assert response.json["ethernet_device"] == ethernet_device
|
||||
|
||||
|
||||
def test_iou_nio_create_tap(http_compute, vm, ethernet_device):
|
||||
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_tap",
|
||||
"tap_device": ethernet_device})
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
|
||||
"tap_device": ethernet_device})
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_tap"
|
||||
|
||||
|
||||
def test_iou_delete_nio(http_compute, vm):
|
||||
http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"})
|
||||
response = http_compute.delete("/projects/{project_id}/iou/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"})
|
||||
response = http_compute.delete("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/iou/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_iou_start_capture(http_compute, vm, tmpdir, project):
|
||||
@ -266,7 +266,7 @@ def test_iou_start_capture(http_compute, vm, tmpdir, project):
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), body=params, example=True)
|
||||
|
||||
assert response.status == 200
|
||||
|
||||
@ -280,7 +280,7 @@ def test_iou_start_capture_not_started(http_compute, vm, tmpdir):
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), body=params)
|
||||
|
||||
assert not start_capture.called
|
||||
assert response.status == 409
|
||||
@ -291,7 +291,7 @@ def test_iou_stop_capture(http_compute, vm, tmpdir, project):
|
||||
with patch("gns3server.compute.iou.iou_vm.IOUVM.is_running", return_value=True) as mock:
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.stop_capture") as stop_capture:
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
|
||||
assert response.status == 204
|
||||
|
||||
@ -303,7 +303,7 @@ def test_iou_stop_capture_not_started(http_compute, vm, tmpdir):
|
||||
with patch("gns3server.compute.iou.iou_vm.IOUVM.is_running", return_value=False) as mock:
|
||||
with asyncio_patch("gns3server.compute.iou.iou_vm.IOUVM.stop_capture") as stop_capture:
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/iou/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], node_id=vm["node_id"]))
|
||||
|
||||
assert not stop_capture.called
|
||||
assert response.status == 409
|
||||
@ -311,7 +311,7 @@ def test_iou_stop_capture_not_started(http_compute, vm, tmpdir):
|
||||
|
||||
def test_get_configs_without_configs_file(http_compute, vm):
|
||||
|
||||
response = http_compute.get("/projects/{project_id}/iou/vms/{vm_id}/configs".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/iou/nodes/{node_id}/configs".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert "startup_config" not in response.json
|
||||
assert "private_config" not in response.json
|
||||
@ -323,7 +323,7 @@ def test_get_configs_with_startup_config_file(http_compute, project, vm):
|
||||
with open(path, "w+") as f:
|
||||
f.write("TEST")
|
||||
|
||||
response = http_compute.get("/projects/{project_id}/iou/vms/{vm_id}/configs".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/iou/nodes/{node_id}/configs".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert response.json["startup_config_content"] == "TEST"
|
||||
|
||||
@ -331,14 +331,14 @@ def test_get_configs_with_startup_config_file(http_compute, project, vm):
|
||||
def test_vms(http_compute, tmpdir, fake_iou_bin):
|
||||
|
||||
with patch("gns3server.compute.IOU.get_images_directory", return_value=str(tmpdir)):
|
||||
response = http_compute.get("/iou/vms", example=True)
|
||||
response = http_compute.get("/iou/nodes", example=True)
|
||||
assert response.status == 200
|
||||
assert response.json == [{"filename": "iou.bin", "path": "iou.bin"}]
|
||||
|
||||
|
||||
def test_upload_vm(http_compute, tmpdir):
|
||||
with patch("gns3server.compute.IOU.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/iou/vms/test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/iou/nodes/test2", body="TEST", raw=True)
|
||||
assert response.status == 204
|
||||
|
||||
with open(str(tmpdir / "test2")) as f:
|
||||
|
@ -58,15 +58,15 @@ def base_params(tmpdir, fake_qemu_bin):
|
||||
|
||||
@pytest.fixture
|
||||
def vm(http_compute, project, base_params):
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
return response.json
|
||||
|
||||
|
||||
def test_qemu_create(http_compute, project, base_params, fake_qemu_bin):
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/qemu/vms"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["qemu_path"] == fake_qemu_bin
|
||||
@ -77,9 +77,9 @@ def test_qemu_create_platform(http_compute, project, base_params, fake_qemu_bin)
|
||||
base_params["qemu_path"] = None
|
||||
base_params["platform"] = "x86_64"
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), base_params)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/qemu/vms"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["qemu_path"] == fake_qemu_bin
|
||||
@ -91,10 +91,10 @@ def test_qemu_create_with_params(http_compute, project, base_params, fake_qemu_v
|
||||
params["ram"] = 1024
|
||||
params["hda_disk_image"] = "linux载.img"
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), params, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes".format(project_id=project.id), params, example=True)
|
||||
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/qemu/vms"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["ram"] == 1024
|
||||
@ -103,17 +103,17 @@ def test_qemu_create_with_params(http_compute, project, base_params, fake_qemu_v
|
||||
|
||||
|
||||
def test_qemu_get(http_compute, project, vm):
|
||||
response = http_compute.get("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"],node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert response.route == "/projects/{project_id}/qemu/vms/{vm_id}"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["vm_directory"] == os.path.join(project.path, "project-files", "qemu", vm["vm_id"])
|
||||
assert response.json["vm_directory"] == os.path.join(project.path, "project-files", "qemu", vm["node_id"])
|
||||
|
||||
|
||||
def test_qemu_start(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
@ -121,35 +121,35 @@ def test_qemu_start(http_compute, vm):
|
||||
|
||||
def test_qemu_stop(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.stop", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_qemu_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.reload", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/reload".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_qemu_suspend(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.suspend", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/suspend".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_qemu_resume(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.resume", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/resume".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/resume".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_qemu_delete(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.qemu.Qemu.delete_vm", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
with asyncio_patch("gns3server.compute.qemu.Qemu.delete_node", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
@ -161,7 +161,7 @@ def test_qemu_update(http_compute, vm, tmpdir, free_console_port, project, fake_
|
||||
"ram": 1024,
|
||||
"hdb_disk_image": "linux.img"
|
||||
}
|
||||
response = http_compute.put("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), params, example=True)
|
||||
response = http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), params, example=True)
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "test"
|
||||
assert response.json["console"] == free_console_port
|
||||
@ -170,35 +170,35 @@ def test_qemu_update(http_compute, vm, tmpdir, free_console_port, project, fake_
|
||||
|
||||
|
||||
def test_qemu_nio_create_udp(http_compute, vm):
|
||||
http_compute.put("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"adapters": 2})
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/qemu/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
def test_qemu_nio_create_ethernet(http_compute, vm):
|
||||
http_compute.put("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"adapters": 2})
|
||||
response = http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": "eth0",
|
||||
},
|
||||
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
|
||||
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_generic_ethernet",
|
||||
"ethernet_device": "eth0",
|
||||
},
|
||||
example=True)
|
||||
assert response.status == 409
|
||||
|
||||
|
||||
def test_qemu_delete_nio(http_compute, vm):
|
||||
http_compute.put("/projects/{project_id}/qemu/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"adapters": 2})
|
||||
http_compute.post("/projects/{project_id}/qemu/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"})
|
||||
response = http_compute.delete("/projects/{project_id}/qemu/vms/{vm_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
|
||||
http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"})
|
||||
response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/qemu/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_qemu_list_binaries(http_compute, vm):
|
||||
@ -226,14 +226,14 @@ def test_qemu_list_binaries_filter(http_compute, vm):
|
||||
|
||||
def test_vms(http_compute, tmpdir, fake_qemu_vm):
|
||||
|
||||
response = http_compute.get("/qemu/vms")
|
||||
response = http_compute.get("/qemu/nodes")
|
||||
assert response.status == 200
|
||||
assert response.json == [{"filename": "linux载.img", "path": "linux载.img"}]
|
||||
|
||||
|
||||
def test_upload_vm(http_compute, tmpdir):
|
||||
with patch("gns3server.compute.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/qemu/vms/test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/qemu/nodes/test2", body="TEST", raw=True)
|
||||
assert response.status == 204
|
||||
|
||||
with open(str(tmpdir / "test2")) as f:
|
||||
@ -246,7 +246,7 @@ def test_upload_vm(http_compute, tmpdir):
|
||||
|
||||
def test_upload_vm_ova(http_compute, tmpdir):
|
||||
with patch("gns3server.compute.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/qemu/vms/test2.ova/test2.vmdk", body="TEST", raw=True)
|
||||
response = http_compute.post("/qemu/nodes/test2.ova/test2.vmdk", body="TEST", raw=True)
|
||||
assert response.status == 204
|
||||
|
||||
with open(str(tmpdir / "test2.ova" / "test2.vmdk")) as f:
|
||||
@ -259,7 +259,7 @@ def test_upload_vm_ova(http_compute, tmpdir):
|
||||
|
||||
def test_upload_vm_forbiden_location(http_compute, tmpdir):
|
||||
with patch("gns3server.compute.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/qemu/vms/../../test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/qemu/nodes/../../test2", body="TEST", raw=True)
|
||||
assert response.status == 403
|
||||
|
||||
|
||||
@ -269,7 +269,7 @@ def test_upload_vm_permission_denied(http_compute, tmpdir):
|
||||
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||
|
||||
with patch("gns3server.compute.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||
response = http_compute.post("/qemu/vms/test2", body="TEST", raw=True)
|
||||
response = http_compute.post("/qemu/nodes/test2", body="TEST", raw=True)
|
||||
assert response.status == 409
|
||||
|
||||
|
||||
|
@ -26,7 +26,7 @@ def vm(http_compute, project, monkeypatch):
|
||||
vboxmanage_path = "/fake/VboxManage"
|
||||
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.create", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms".format(project_id=project.id), {"name": "VMTEST",
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes".format(project_id=project.id), {"name": "VMTEST",
|
||||
"vmname": "VMTEST",
|
||||
"linked_clone": False})
|
||||
assert mock.called
|
||||
@ -39,7 +39,7 @@ def vm(http_compute, project, monkeypatch):
|
||||
def test_vbox_create(http_compute, project):
|
||||
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.create", return_value=True):
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms".format(project_id=project.id), {"name": "VM1",
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes".format(project_id=project.id), {"name": "VM1",
|
||||
"vmname": "VM1",
|
||||
"linked_clone": False},
|
||||
example=True)
|
||||
@ -49,9 +49,9 @@ def test_vbox_create(http_compute, project):
|
||||
|
||||
|
||||
def test_vbox_get(http_compute, project, vm):
|
||||
response = http_compute.get("/projects/{project_id}/virtualbox/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/virtualbox/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert response.route == "/projects/{project_id}/virtualbox/vms/{vm_id}"
|
||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}"
|
||||
assert response.json["name"] == "VMTEST"
|
||||
assert response.json["project_id"] == project.id
|
||||
|
||||
@ -59,35 +59,35 @@ def test_vbox_get(http_compute, project, vm):
|
||||
def test_vbox_start(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.check_hw_virtualization", return_value=True) as mock:
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vbox_stop(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.stop", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vbox_suspend(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.suspend", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/suspend".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vbox_resume(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.resume", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/resume".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/resume".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vbox_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.reload", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/reload".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
@ -95,11 +95,11 @@ def test_vbox_reload(http_compute, vm):
|
||||
def test_vbox_nio_create_udp(http_compute, vm):
|
||||
|
||||
with asyncio_patch('gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.adapter_add_nio_binding') as mock:
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"],
|
||||
vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
response = http_compute.post("/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"],
|
||||
node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
|
||||
assert mock.called
|
||||
@ -107,26 +107,26 @@ def test_vbox_nio_create_udp(http_compute, vm):
|
||||
assert args[0] == 0
|
||||
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
def test_vbox_delete_nio(http_compute, vm):
|
||||
|
||||
with asyncio_patch('gns3server.compute.virtualbox.virtualbox_vm.VirtualBoxVM.adapter_remove_nio_binding') as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.delete("/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
|
||||
assert mock.called
|
||||
args, kwgars = mock.call_args
|
||||
assert args[0] == 0
|
||||
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/virtualbox/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_vbox_update(http_compute, vm, free_console_port):
|
||||
response = http_compute.put("/projects/{project_id}/virtualbox/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||
"console": free_console_port},
|
||||
response = http_compute.put("/projects/{project_id}/virtualbox/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"name": "test",
|
||||
"console": free_console_port},
|
||||
example=True)
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "test"
|
||||
|
@ -24,23 +24,23 @@ from unittest.mock import patch
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def vm(http_compute, project):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms".format(project_id=project.id), {"name": "PC TEST 1"})
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes".format(project_id=project.id), {"name": "PC TEST 1"})
|
||||
assert response.status == 201
|
||||
return response.json
|
||||
|
||||
|
||||
def test_vpcs_create(http_compute, project):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms".format(project_id=project.id), {"name": "PC TEST 1"}, example=True)
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes".format(project_id=project.id), {"name": "PC TEST 1"}, example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
|
||||
|
||||
def test_vpcs_get(http_compute, project, vm):
|
||||
response = http_compute.get("/projects/{project_id}/vpcs/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.get("/projects/{project_id}/vpcs/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 200
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms/{vm_id}"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["startup_script_path"] is None
|
||||
@ -48,9 +48,9 @@ def test_vpcs_get(http_compute, project, vm):
|
||||
|
||||
|
||||
def test_vpcs_create_startup_script(http_compute, project):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms".format(project_id=project.id), {"name": "PC TEST 1", "startup_script": "ip 192.168.1.2\necho TEST"})
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes".format(project_id=project.id), {"name": "PC TEST 1", "startup_script": "ip 192.168.1.2\necho TEST"})
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["startup_script"] == os.linesep.join(["ip 192.168.1.2", "echo TEST"])
|
||||
@ -58,49 +58,49 @@ def test_vpcs_create_startup_script(http_compute, project):
|
||||
|
||||
|
||||
def test_vpcs_create_port(http_compute, project, free_console_port):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms".format(project_id=project.id), {"name": "PC TEST 1", "console": free_console_port})
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes".format(project_id=project.id), {"name": "PC TEST 1", "console": free_console_port})
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["console"] == free_console_port
|
||||
|
||||
|
||||
def test_vpcs_nio_create_udp(http_compute, vm):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
||||
def test_vpcs_nio_create_tap(http_compute, vm, ethernet_device):
|
||||
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_tap",
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
|
||||
"tap_device": ethernet_device})
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_tap"
|
||||
|
||||
|
||||
def test_vpcs_delete_nio(http_compute, vm):
|
||||
http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"})
|
||||
response = http_compute.delete("/projects/{project_id}/vpcs/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/vpcs/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_vpcs_start(http_compute, vm):
|
||||
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/start".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
@ -108,27 +108,27 @@ def test_vpcs_start(http_compute, vm):
|
||||
|
||||
def test_vpcs_stop(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.stop", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/stop".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vpcs_reload(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.reload", return_value=True) as mock:
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/reload".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vpcs_delete(http_compute, vm):
|
||||
with asyncio_patch("gns3server.compute.vpcs.VPCS.delete_vm", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/vpcs/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
with asyncio_patch("gns3server.compute.vpcs.VPCS.delete_node", return_value=True) as mock:
|
||||
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_vpcs_update(http_compute, vm, tmpdir, free_console_port):
|
||||
response = http_compute.put("/projects/{project_id}/vpcs/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||
response = http_compute.put("/projects/{project_id}/vpcs/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"name": "test",
|
||||
"console": free_console_port,
|
||||
"startup_script": "ip 192.168.1.1"},
|
||||
example=True)
|
||||
|
@ -31,7 +31,7 @@ from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||
|
||||
from gns3server.handlers.api.controller.project_handler import ProjectHandler
|
||||
from gns3server.controller import Controller
|
||||
from gns3server.controller.vm import VM
|
||||
from gns3server.controller.node import Node
|
||||
from gns3server.controller.link import Link
|
||||
|
||||
|
||||
@ -53,19 +53,19 @@ def test_create_link(http_controller, tmpdir, project, compute, async_run):
|
||||
response.json = {"console": 2048}
|
||||
compute.post = AsyncioMagicMock(return_value=response)
|
||||
|
||||
vm1 = async_run(project.addVM(compute, None))
|
||||
vm2 = async_run(project.addVM(compute, None))
|
||||
node1 = async_run(project.add_node(compute, None))
|
||||
node2 = async_run(project.add_node(compute, None))
|
||||
|
||||
with asyncio_patch("gns3server.controller.udp_link.UDPLink.create") as mock:
|
||||
response = http_controller.post("/projects/{}/links".format(project.id), {
|
||||
"vms": [
|
||||
"nodes": [
|
||||
{
|
||||
"vm_id": vm1.id,
|
||||
"node_id": node1.id,
|
||||
"adapter_number": 0,
|
||||
"port_number": 3
|
||||
},
|
||||
{
|
||||
"vm_id": vm2.id,
|
||||
"node_id": node2.id,
|
||||
"adapter_number": 2,
|
||||
"port_number": 4
|
||||
}
|
||||
@ -74,7 +74,7 @@ def test_create_link(http_controller, tmpdir, project, compute, async_run):
|
||||
assert mock.called
|
||||
assert response.status == 201
|
||||
assert response.json["link_id"] is not None
|
||||
assert len(response.json["vms"]) == 2
|
||||
assert len(response.json["nodes"]) == 2
|
||||
|
||||
|
||||
def test_start_capture(http_controller, tmpdir, project, compute, async_run):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user