diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000..54e2f8e1
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,70 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+ schedule:
+ - cron: '44 1 * * 3'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://git.io/codeql-language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/publish_api_documentation.yml b/.github/workflows/publish_api_documentation.yml
index 348ebe16..782e76fe 100644
--- a/.github/workflows/publish_api_documentation.yml
+++ b/.github/workflows/publish_api_documentation.yml
@@ -18,7 +18,7 @@ jobs:
ref: "gh-pages"
- uses: actions/setup-python@v2
with:
- python-version: 3.6
+ python-version: 3.7
- name: Merge changes from 3.0 branch
run: |
git config user.name github-actions
diff --git a/CHANGELOG b/CHANGELOG
index b453d4f8..c9b2e747 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,5 +1,17 @@
# Change Log
+## 2.2.32 27/04/2022
+
+* Docker: load custom interface files from /etc/network/interfaces (commented by default). Ref #2052
+* Release web UI 2.2.32
+* Create `/etc/network/interfaces.d` in Docker container. Fixes #2052
+* Prettify Docker '/etc/network/interfaces' file. Ref #2040
+* Use public DSNs for Sentry
+* Fix VMware Fusion VM does not start on macOS >= 11. Fixes #2027
+* Include conf file in MANIFEST.in Ref #2044
+* Use Python 3.7 to publish API documentation
+* Development on 2.2.32dev1
+
## 2.2.31 26/02/2022
* Install setuptools v59.6.0 when using Python 3.6
diff --git a/MANIFEST.in b/MANIFEST.in
index 438d9f6e..2b9ab0ff 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@ include AUTHORS
include LICENSE
include MANIFEST.in
include requirements.txt
+include conf/*.conf
recursive-include tests *
recursive-exclude docs *
recursive-include gns3server *
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000..7161ebd1
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,17 @@
+# Security Policy
+
+## Supported Versions
+
+These are the versions of the GNS3 server that are
+currently being supported with security updates.
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.x.x | :white_check_mark: |
+| 2.2.x | :white_check_mark: |
+| 2.1.x | :x: |
+| 1.x.x | :x: |
+
+## Reporting a Vulnerability
+
+Please contact us at security@gns3.net
diff --git a/gns3server/api/routes/compute/__init__.py b/gns3server/api/routes/compute/__init__.py
index 69857ec8..2453c0e8 100644
--- a/gns3server/api/routes/compute/__init__.py
+++ b/gns3server/api/routes/compute/__init__.py
@@ -51,6 +51,11 @@ from . import virtualbox_nodes
from . import vmware_nodes
from . import vpcs_nodes
+import logging
+
+log = logging.getLogger(__name__)
+
+
compute_api = FastAPI(
title="GNS3 compute API",
dependencies=[Depends(compute_authentication)],
@@ -63,6 +68,7 @@ compute_api.state.controller_host = None
@compute_api.exception_handler(ComputeError)
async def controller_error_handler(request: Request, exc: ComputeError):
+ log.error(f"Compute error: {exc}")
return JSONResponse(
status_code=409,
content={"message": str(exc)},
@@ -71,6 +77,7 @@ async def controller_error_handler(request: Request, exc: ComputeError):
@compute_api.exception_handler(ComputeTimeoutError)
async def controller_timeout_error_handler(request: Request, exc: ComputeTimeoutError):
+ log.error(f"Compute timeout error: {exc}")
return JSONResponse(
status_code=408,
content={"message": str(exc)},
@@ -79,6 +86,7 @@ async def controller_timeout_error_handler(request: Request, exc: ComputeTimeout
@compute_api.exception_handler(ComputeUnauthorizedError)
async def controller_unauthorized_error_handler(request: Request, exc: ComputeUnauthorizedError):
+ log.error(f"Compute unauthorized error: {exc}")
return JSONResponse(
status_code=401,
content={"message": str(exc)},
@@ -87,6 +95,7 @@ async def controller_unauthorized_error_handler(request: Request, exc: ComputeUn
@compute_api.exception_handler(ComputeForbiddenError)
async def controller_forbidden_error_handler(request: Request, exc: ComputeForbiddenError):
+ log.error(f"Compute forbidden error: {exc}")
return JSONResponse(
status_code=403,
content={"message": str(exc)},
@@ -95,6 +104,7 @@ async def controller_forbidden_error_handler(request: Request, exc: ComputeForbi
@compute_api.exception_handler(ComputeNotFoundError)
async def controller_not_found_error_handler(request: Request, exc: ComputeNotFoundError):
+ log.error(f"Compute not found error: {exc}")
return JSONResponse(
status_code=404,
content={"message": str(exc)},
@@ -103,6 +113,7 @@ async def controller_not_found_error_handler(request: Request, exc: ComputeNotFo
@compute_api.exception_handler(GNS3VMError)
async def controller_error_handler(request: Request, exc: GNS3VMError):
+ log.error(f"Compute GNS3 VM error: {exc}")
return JSONResponse(
status_code=409,
content={"message": str(exc)},
@@ -111,6 +122,7 @@ async def controller_error_handler(request: Request, exc: GNS3VMError):
@compute_api.exception_handler(ImageMissingError)
async def image_missing_error_handler(request: Request, exc: ImageMissingError):
+ log.error(f"Compute image missing error: {exc}")
return JSONResponse(
status_code=409,
content={"message": str(exc), "image": exc.image, "exception": exc.__class__.__name__},
@@ -119,6 +131,7 @@ async def image_missing_error_handler(request: Request, exc: ImageMissingError):
@compute_api.exception_handler(NodeError)
async def node_error_handler(request: Request, exc: NodeError):
+ log.error(f"Compute node error: {exc}")
return JSONResponse(
status_code=409,
content={"message": str(exc), "exception": exc.__class__.__name__},
@@ -127,6 +140,7 @@ async def node_error_handler(request: Request, exc: NodeError):
@compute_api.exception_handler(UbridgeError)
async def ubridge_error_handler(request: Request, exc: UbridgeError):
+ log.error(f"Compute uBridge error: {exc}")
return JSONResponse(
status_code=409,
content={"message": str(exc), "exception": exc.__class__.__name__},
diff --git a/gns3server/api/routes/compute/compute.py b/gns3server/api/routes/compute/compute.py
index 518f7441..5ff3f424 100644
--- a/gns3server/api/routes/compute/compute.py
+++ b/gns3server/api/routes/compute/compute.py
@@ -25,7 +25,7 @@ import psutil
from gns3server.config import Config
from gns3server.utils.cpu_percent import CpuPercent
from gns3server.version import __version__
-from gns3server.utils.path import get_default_project_directory
+from gns3server.utils.path import get_default_project_directory, is_safe_path
from gns3server.compute.port_manager import PortManager
from gns3server.compute.project_manager import ProjectManager
from gns3server.utils.interfaces import interfaces
@@ -81,8 +81,7 @@ def compute_version() -> dict:
Retrieve the server version number.
"""
- local_server = Config.instance().settings.Server.local
- return {"version": __version__, "local": local_server}
+ return {"version": __version__}
@router.get("/statistics")
@@ -145,47 +144,6 @@ async def get_qemu_capabilities() -> dict:
return capabilities
-@router.post(
- "/qemu/img",
- status_code=status.HTTP_204_NO_CONTENT,
- responses={403: {"model": schemas.ErrorMessage, "description": "Forbidden to create Qemu image"}},
-)
-async def create_qemu_image(image_data: schemas.QemuImageCreate) -> Response:
- """
- Create a Qemu image.
- """
-
- if os.path.isabs(image_data.path):
- if Config.instance().settings.Server.local is False:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
-
- await Qemu.instance().create_disk(
- image_data.qemu_img, image_data.path, jsonable_encoder(image_data, exclude_unset=True)
- )
-
- return Response(status_code=status.HTTP_204_NO_CONTENT)
-
-
-@router.put(
- "/qemu/img",
- status_code=status.HTTP_204_NO_CONTENT,
- responses={403: {"model": schemas.ErrorMessage, "description": "Forbidden to update Qemu image"}},
-)
-async def update_qemu_image(image_data: schemas.QemuImageUpdate) -> Response:
- """
- Update a Qemu image.
- """
-
- if os.path.isabs(image_data.path):
- if Config.instance().settings.Server.local is False:
- raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
-
- if image_data.extend:
- await Qemu.instance().resize_disk(image_data.qemu_img, image_data.path, image_data.extend)
-
- return Response(status_code=status.HTTP_204_NO_CONTENT)
-
-
@router.get("/virtualbox/vms", response_model=List[dict])
async def get_virtualbox_vms() -> List[dict]:
diff --git a/gns3server/api/routes/compute/qemu_nodes.py b/gns3server/api/routes/compute/qemu_nodes.py
index d60625d5..c5df55f7 100644
--- a/gns3server/api/routes/compute/qemu_nodes.py
+++ b/gns3server/api/routes/compute/qemu_nodes.py
@@ -26,10 +26,10 @@ from fastapi.responses import StreamingResponse
from uuid import UUID
from gns3server import schemas
-from gns3server.compute.project_manager import ProjectManager
from gns3server.compute.qemu import Qemu
from gns3server.compute.qemu.qemu_vm import QemuVM
+
responses = {404: {"model": schemas.ErrorMessage, "description": "Could not find project or Qemu node"}}
router = APIRouter(responses=responses)
@@ -126,10 +126,55 @@ async def duplicate_qemu_node(
return new_node.asdict()
-@router.post("/{node_id}/resize_disk", status_code=status.HTTP_204_NO_CONTENT)
-async def resize_qemu_node_disk(node_data: schemas.QemuDiskResize, node: QemuVM = Depends(dep_node)) -> Response:
+@router.post(
+ "/{node_id}/disk_image/{disk_name}",
+ status_code=status.HTTP_204_NO_CONTENT
+)
+async def create_qemu_disk_image(
+ disk_name: str,
+ disk_data: schemas.QemuDiskImageCreate,
+ node: QemuVM = Depends(dep_node)
+) -> Response:
+ """
+ Create a Qemu disk image.
+ """
- await node.resize_disk(node_data.drive_name, node_data.extend)
+ options = jsonable_encoder(disk_data, exclude_unset=True)
+ await node.create_disk_image(disk_name, options)
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.put(
+ "/{node_id}/disk_image/{disk_name}",
+ status_code=status.HTTP_204_NO_CONTENT
+)
+async def update_qemu_disk_image(
+ disk_name: str,
+ disk_data: schemas.QemuDiskImageUpdate,
+ node: QemuVM = Depends(dep_node)
+) -> Response:
+ """
+ Update a Qemu disk image.
+ """
+
+ if disk_data.extend:
+ await node.resize_disk_image(disk_name, disk_data.extend)
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.delete(
+ "/{node_id}/disk_image/{disk_name}",
+ status_code=status.HTTP_204_NO_CONTENT
+)
+async def delete_qemu_disk_image(
+ disk_name: str,
+ node: QemuVM = Depends(dep_node)
+) -> Response:
+ """
+ Delete a Qemu disk image.
+ """
+
+ node.delete_disk_image(disk_name)
return Response(status_code=status.HTTP_204_NO_CONTENT)
diff --git a/gns3server/api/routes/controller/images.py b/gns3server/api/routes/controller/images.py
index 0ad05921..6bd3cc13 100644
--- a/gns3server/api/routes/controller/images.py
+++ b/gns3server/api/routes/controller/images.py
@@ -52,12 +52,13 @@ router = APIRouter()
@router.get("", response_model=List[schemas.Image])
async def get_images(
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
+ image_type: Optional[schemas.ImageType] = None
) -> List[schemas.Image]:
"""
Return all images.
"""
- return await images_repo.get_images()
+ return await images_repo.get_images(image_type)
@router.post("/upload/{image_path:path}", response_model=schemas.Image, status_code=status.HTTP_201_CREATED)
@@ -85,7 +86,6 @@ async def upload_image(
if os.path.commonprefix([base_images_directory, full_path]) != base_images_directory:
raise ControllerForbiddenError(f"Cannot write image, '{image_path}' is forbidden")
- print(image_path)
if await images_repo.get_image(image_path):
raise ControllerBadRequestError(f"Image '{image_path}' already exists")
diff --git a/gns3server/api/routes/controller/nodes.py b/gns3server/api/routes/controller/nodes.py
index 0f28eb73..b97eb70d 100644
--- a/gns3server/api/routes/controller/nodes.py
+++ b/gns3server/api/routes/controller/nodes.py
@@ -32,7 +32,7 @@ from gns3server.controller.node import Node
from gns3server.controller.project import Project
from gns3server.utils import force_unix_path
from gns3server.utils.http_client import HTTPClient
-from gns3server.controller.controller_error import ControllerForbiddenError
+from gns3server.controller.controller_error import ControllerForbiddenError, ControllerBadRequestError
from gns3server import schemas
import logging
@@ -300,6 +300,8 @@ async def auto_idlepc(node: Node = Depends(dep_node)) -> str:
Compute an Idle-PC value for a Dynamips node
"""
+ if node.node_type != "dynamips":
+ raise ControllerBadRequestError("Auto Idle-PC is only supported on a Dynamips node")
return await node.dynamips_auto_idlepc()
@@ -309,16 +311,55 @@ async def idlepc_proposals(node: Node = Depends(dep_node)) -> List[str]:
Compute a list of potential idle-pc values for a Dynamips node
"""
+ if node.node_type != "dynamips":
+ raise ControllerBadRequestError("Idle-PC proposals is only supported on a Dynamips node")
return await node.dynamips_idlepc_proposals()
-@router.post("/{node_id}/resize_disk", status_code=status.HTTP_204_NO_CONTENT)
-async def resize_disk(resize_data: dict, node: Node = Depends(dep_node)) -> Response:
+@router.post("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
+async def create_disk_image(
+ disk_name: str,
+ disk_data: schemas.QemuDiskImageCreate,
+ node: Node = Depends(dep_node)
+) -> Response:
"""
- Resize a disk image.
+ Create a Qemu disk image.
"""
- await node.post("/resize_disk", **resize_data)
+ if node.node_type != "qemu":
+ raise ControllerBadRequestError("Creating a disk image is only supported on a Qemu node")
+ await node.post(f"/disk_image/{disk_name}", data=disk_data.dict(exclude_unset=True))
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.put("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
+async def update_disk_image(
+ disk_name: str,
+ disk_data: schemas.QemuDiskImageUpdate,
+ node: Node = Depends(dep_node)
+) -> Response:
+ """
+ Update a Qemu disk image.
+ """
+
+ if node.node_type != "qemu":
+ raise ControllerBadRequestError("Updating a disk image is only supported on a Qemu node")
+ await node.put(f"/disk_image/{disk_name}", data=disk_data.dict(exclude_unset=True))
+ return Response(status_code=status.HTTP_204_NO_CONTENT)
+
+
+@router.delete("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
+async def delete_disk_image(
+ disk_name: str,
+ node: Node = Depends(dep_node)
+) -> Response:
+ """
+ Delete a Qemu disk image.
+ """
+
+ if node.node_type != "qemu":
+ raise ControllerBadRequestError("Deleting a disk image is only supported on a Qemu node")
+ await node.delete(f"/disk_image/{disk_name}")
return Response(status_code=status.HTTP_204_NO_CONTENT)
diff --git a/gns3server/api/routes/controller/projects.py b/gns3server/api/routes/controller/projects.py
index 59dd1108..8dca5985 100644
--- a/gns3server/api/routes/controller/projects.py
+++ b/gns3server/api/routes/controller/projects.py
@@ -21,10 +21,10 @@ API routes for projects.
import os
import asyncio
import tempfile
-import zipfile
import aiofiles
import time
import urllib.parse
+import gns3server.utils.zipfile_zstd as zipfile
import logging
@@ -41,7 +41,7 @@ from pathlib import Path
from gns3server import schemas
from gns3server.controller import Controller
from gns3server.controller.project import Project
-from gns3server.controller.controller_error import ControllerError, ControllerForbiddenError
+from gns3server.controller.controller_error import ControllerError, ControllerBadRequestError
from gns3server.controller.import_project import import_project as import_controller_project
from gns3server.controller.export_project import export_project as export_controller_project
from gns3server.utils.asyncio import aiozipstream
@@ -285,7 +285,8 @@ async def export_project(
include_snapshots: bool = False,
include_images: bool = False,
reset_mac_addresses: bool = False,
- compression: str = "zip",
+ compression: schemas.ProjectCompression = "zstd",
+ compression_level: int = None,
) -> StreamingResponse:
"""
Export a project as a portable archive.
@@ -294,12 +295,23 @@ async def export_project(
compression_query = compression.lower()
if compression_query == "zip":
compression = zipfile.ZIP_DEFLATED
+ if compression_level is not None and (compression_level < 0 or compression_level > 9):
+ raise ControllerBadRequestError("Compression level must be between 0 and 9 for ZIP compression")
elif compression_query == "none":
compression = zipfile.ZIP_STORED
elif compression_query == "bzip2":
compression = zipfile.ZIP_BZIP2
+ if compression_level is not None and (compression_level < 1 or compression_level > 9):
+ raise ControllerBadRequestError("Compression level must be between 1 and 9 for BZIP2 compression")
elif compression_query == "lzma":
compression = zipfile.ZIP_LZMA
+ elif compression_query == "zstd":
+ compression = zipfile.ZIP_ZSTANDARD
+ if compression_level is not None and (compression_level < 1 or compression_level > 22):
+ raise ControllerBadRequestError("Compression level must be between 1 and 22 for Zstandard compression")
+
+ if compression_level is not None and compression_query in ("none", "lzma"):
+ raise ControllerBadRequestError(f"Compression level is not supported for '{compression_query}' compression method")
try:
begin = time.time()
@@ -307,8 +319,10 @@ async def export_project(
working_dir = os.path.abspath(os.path.join(project.path, os.pardir))
async def streamer():
+ log.info(f"Exporting project '{project.name}' with '{compression_query}' compression "
+ f"(level {compression_level})")
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
- with aiozipstream.ZipFile(compression=compression) as zstream:
+ with aiozipstream.ZipFile(compression=compression, compresslevel=compression_level) as zstream:
await export_controller_project(
zstream,
project,
@@ -342,10 +356,10 @@ async def import_project(
Import a project from a portable archive.
"""
- controller = Controller.instance()
- if Config.instance().settings.Server.local is False:
- raise ControllerForbiddenError("The server is not local")
+ #TODO: import project remotely
+ raise NotImplementedError()
+ controller = Controller.instance()
# We write the content to a temporary location and after we extract it all.
# It could be more optimal to stream this but it is not implemented in Python.
try:
@@ -385,16 +399,9 @@ async def duplicate_project(
Duplicate a project.
"""
- if project_data.path:
- if Config.instance().settings.Server.local is False:
- raise ControllerForbiddenError("The server is not a local server")
- location = project_data.path
- else:
- location = None
-
reset_mac_addresses = project_data.reset_mac_addresses
new_project = await project.duplicate(
- name=project_data.name, location=location, reset_mac_addresses=reset_mac_addresses
+ name=project_data.name, reset_mac_addresses=reset_mac_addresses
)
await rbac_repo.add_permission_to_user_with_path(current_user.user_id, f"/projects/{new_project.id}/*")
return new_project.asdict()
@@ -423,7 +430,7 @@ async def get_file(file_path: str, project: Project = Depends(dep_project)) -> F
@router.post("/{project_id}/files/{file_path:path}", status_code=status.HTTP_204_NO_CONTENT)
async def write_file(file_path: str, request: Request, project: Project = Depends(dep_project)) -> Response:
"""
- Write a file from a project.
+ Write a file to a project.
"""
file_path = urllib.parse.unquote(file_path)
diff --git a/gns3server/api/routes/controller/users.py b/gns3server/api/routes/controller/users.py
index 76b704f7..8d1953f0 100644
--- a/gns3server/api/routes/controller/users.py
+++ b/gns3server/api/routes/controller/users.py
@@ -75,7 +75,7 @@ async def authenticate(
) -> schemas.Token:
"""
Alternative authentication method using json.
- Example: curl http://host:port/v3/users/authenticate -d '{"username": "admin", "password": "admin"} -H "Content-Type: application/json" '
+ Example: curl http://host:port/v3/users/authenticate -d '{"username": "admin", "password": "admin"}' -H "Content-Type: application/json"
"""
user = await users_repo.authenticate_user(username=user_credentials.username, password=user_credentials.password)
diff --git a/gns3server/api/server.py b/gns3server/api/server.py
index 3c9fd2b6..c3ceb816 100644
--- a/gns3server/api/server.py
+++ b/gns3server/api/server.py
@@ -34,6 +34,7 @@ from gns3server.controller.controller_error import (
ControllerTimeoutError,
ControllerForbiddenError,
ControllerUnauthorizedError,
+ ComputeConflictError
)
from gns3server.api.routes import controller, index
@@ -138,6 +139,15 @@ async def controller_bad_request_error_handler(request: Request, exc: Controller
)
+@app.exception_handler(ComputeConflictError)
+async def compute_conflict_error_handler(request: Request, exc: ComputeConflictError):
+ log.error(f"Controller received error from compute for request '{exc.url()}': {exc}")
+ return JSONResponse(
+ status_code=409,
+ content={"message": str(exc)},
+ )
+
+
# make sure the content key is "message", not "detail" per default
@app.exception_handler(HTTPException)
async def http_exception_handler(request: Request, exc: HTTPException):
@@ -156,12 +166,14 @@ async def sqlalchemry_error_handler(request: Request, exc: SQLAlchemyError):
content={"message": "Database error detected, please check logs to find details"},
)
+# FIXME: do not use this middleware since it creates issue when using StreamingResponse
+# see https://starlette-context.readthedocs.io/en/latest/middleware.html#why-are-there-two-middlewares-that-do-the-same-thing
-@app.middleware("http")
-async def add_extra_headers(request: Request, call_next):
- start_time = time.time()
- response = await call_next(request)
- process_time = time.time() - start_time
- response.headers["X-Process-Time"] = str(process_time)
- response.headers["X-GNS3-Server-Version"] = f"{__version__}"
- return response
+# @app.middleware("http")
+# async def add_extra_headers(request: Request, call_next):
+# start_time = time.time()
+# response = await call_next(request)
+# process_time = time.time() - start_time
+# response.headers["X-Process-Time"] = str(process_time)
+# response.headers["X-GNS3-Server-Version"] = f"{__version__}"
+# return response
diff --git a/gns3server/appliances/aruba-arubaoscx.gns3a b/gns3server/appliances/aruba-arubaoscx.gns3a
index 8a6ec603..39109f05 100644
--- a/gns3server/appliances/aruba-arubaoscx.gns3a
+++ b/gns3server/appliances/aruba-arubaoscx.gns3a
@@ -30,6 +30,13 @@
"process_priority": "normal"
},
"images": [
+ {
+ "filename": "arubaoscx-disk-image-genericx86-p4-20220223012712.vmdk",
+ "version": "10.09.1000",
+ "md5sum": "7b47c4442d825562e73e3f09b2f1f999",
+ "filesize": 556828672,
+ "download_url": "https://asp.arubanetworks.com/"
+ },
{
"filename": "arubaoscx-disk-image-genericx86-p4-20211206170615.vmdk",
"version": "10.09.0002",
@@ -81,6 +88,12 @@
}
],
"versions": [
+ {
+ "name": "10.09.1000",
+ "images": {
+ "hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20220223012712.vmdk"
+ }
+ },
{
"name": "10.09.0002",
"images": {
diff --git a/gns3server/appliances/cisco-asav.gns3a b/gns3server/appliances/cisco-asav.gns3a
index ccc0e57e..d74e0118 100644
--- a/gns3server/appliances/cisco-asav.gns3a
+++ b/gns3server/appliances/cisco-asav.gns3a
@@ -26,6 +26,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "asav9-16-2.qcow2",
+ "version": "9.16.2",
+ "md5sum": "c3aa2b73b029146ec345bf888dd54eab",
+ "filesize": 264896512,
+ "download_url": "https://software.cisco.com/download/home/286119613/type/280775065/release/9.16.2"
+ },
{
"filename": "asav9-15-1.qcow2",
"version": "9.15.1",
@@ -105,6 +112,12 @@
}
],
"versions": [
+ {
+ "name": "9.16.2",
+ "images": {
+ "hda_disk_image": "asav9-16-2.qcow2"
+ }
+ },
{
"name": "9.15.1",
"images": {
diff --git a/gns3server/appliances/cisco-iosv.gns3a b/gns3server/appliances/cisco-iosv.gns3a
index c6d6bce6..8cfd5dc9 100644
--- a/gns3server/appliances/cisco-iosv.gns3a
+++ b/gns3server/appliances/cisco-iosv.gns3a
@@ -32,6 +32,13 @@
"download_url": "https://sourceforge.net/projects/gns-3/files",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/IOSv_startup_config.img/download"
},
+ {
+ "filename": "vios-adventerprisek9-m.spa.159-3.m4.qcow2",
+ "version": "15.9(3)M4",
+ "md5sum": "355b13ab821e64e2939fd7008d6304d7",
+ "filesize": 57297920,
+ "download_url": "https://learningnetworkstore.cisco.com/myaccount"
+ },
{
"filename": "vios-adventerprisek9-m.spa.159-3.m3.qcow2",
"version": "15.9(3)M3",
@@ -83,6 +90,13 @@
}
],
"versions": [
+ {
+ "name": "15.9(3)M4",
+ "images": {
+ "hda_disk_image": "vios-adventerprisek9-m.spa.159-3.m4.qcow2",
+ "hdb_disk_image": "IOSv_startup_config.img"
+ }
+ },
{
"name": "15.9(3)M3",
"images": {
diff --git a/gns3server/appliances/clavister-netsheild.gns3a b/gns3server/appliances/clavister-netsheild.gns3a
new file mode 100644
index 00000000..f9ac0f11
--- /dev/null
+++ b/gns3server/appliances/clavister-netsheild.gns3a
@@ -0,0 +1,46 @@
+{
+ "appliance_id": "39c6b8db-8dc3-4b04-8727-7d0b414be7c8",
+ "name": "Clavister NetShield",
+ "category": "firewall",
+ "description": "Clavister NetShield (cOS Stream) Virtual Appliance offers the same functionality as the Clavister NetShield physical NGappliances FWs in a virtual environment.",
+ "vendor_name": "Clavister",
+ "vendor_url": "https://www.clavister.com/",
+ "documentation_url": "https://kb.clavister.com",
+ "product_name": "NetShield",
+ "product_url": "https://www.clavister.com/products/netshield/",
+ "registry_version": 4,
+ "status": "stable",
+ "availability": "free-to-try",
+ "maintainer": "Mattias Nordlund",
+ "maintainer_email": "mattias.nordlund@clavister.com",
+ "usage": "No configuration by default, oen console to set IPs and activate configuration.",
+ "port_name_format": "if{0}",
+ "qemu": {
+ "adapter_type": "virtio-net-pci",
+ "adapters": 4,
+ "ram": 1024,
+ "hda_disk_interface": "virtio",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "boot_priority": "c",
+ "kvm": "allow",
+ "options": "-cpu Nehalem"
+ },
+ "images": [
+ {
+ "filename": "clavister-cos-stream-3.80.09.01-virtual-x64-generic.qcow2",
+ "version": "cOS Stream 3.80.09",
+ "md5sum": "b57d8e0f1a3cdd4b2c96ffbc7d7c4f05",
+ "filesize": 134217728,
+ "download_url": "https://my.clavister.com/download/c44639bf-b082-ec11-8308-005056956b6b"
+ }
+ ],
+ "versions": [
+ {
+ "images": {
+ "hda_disk_image": "clavister-cos-stream-3.80.09.01-virtual-x64-generic.qcow2"
+ },
+ "name": "cOS Stream 3.80.09"
+ }
+ ]
+}
diff --git a/gns3server/appliances/clavister-netwall.gns3a b/gns3server/appliances/clavister-netwall.gns3a
new file mode 100644
index 00000000..37d2e59e
--- /dev/null
+++ b/gns3server/appliances/clavister-netwall.gns3a
@@ -0,0 +1,58 @@
+{
+ "appliance_id": "68ddb1dc-e55b-4bcc-9c18-27a9eb5e7413",
+ "name": "Clavister NetWall",
+ "category": "firewall",
+ "description": "Clavister NetWall (cOS Core) Virtual Appliance offers the same functionality as the Clavister NetWall physical NGFWs in a virtual environment.",
+ "vendor_name": "Clavister",
+ "vendor_url": "https://www.clavister.com/",
+ "documentation_url": "https://kb.clavister.com",
+ "product_name": "NetWall",
+ "product_url": "https://www.clavister.com/products/ngfw/",
+ "registry_version": 4,
+ "status": "stable",
+ "availability": "free-to-try",
+ "maintainer": "Mattias Nordlund",
+ "maintainer_email": "mattias.nordlund@clavister.com",
+ "usage": "DHCP enabled on all interfaces by default, WebUI/SSH access enabled on the local network connected to If1.",
+ "port_name_format": "If{0}",
+ "qemu": {
+ "adapter_type": "e1000",
+ "adapters": 4,
+ "ram": 512,
+ "hda_disk_interface": "virtio",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "boot_priority": "c",
+ "kvm": "allow"
+ },
+ "images": [
+ {
+ "filename": "clavister-cos-core-14.00.01.13-kvm-en.img",
+ "version": "cOS Core 14.00.01 (x86)",
+ "md5sum": "6c72eb0bb13d191912ca930b72071d07",
+ "filesize": 134217728,
+ "download_url": "https://my.clavister.com/download/ee3ecb2f-7662-ec11-8308-005056956b6b"
+ },
+ {
+ "filename": "clavister-cos-core-14.00.00.12-kvm-en.img",
+ "version": "cOS Core 14.00.00 (x86)",
+ "md5sum": "496ddd494b226e3508563db837643910",
+ "filesize": 134217728,
+ "download_url": "https://my.clavister.com/download/b2b7bce8-4449-ec11-8308-005056956b6b"
+ }
+ ],
+ "versions": [
+ {
+ "images": {
+ "hda_disk_image": "clavister-cos-core-14.00.01.13-kvm-en.img"
+ },
+ "name": "cOS Core 14.00.01 (x86)"
+ },
+ {
+ "images": {
+ "hda_disk_image": "clavister-cos-core-14.00.00.12-kvm-en.img"
+ },
+ "name": "cOS Core 14.00.00 (x86)"
+ }
+ ]
+}
diff --git a/gns3server/appliances/debian.gns3a b/gns3server/appliances/debian.gns3a
index 5da6d9fe..13f55861 100644
--- a/gns3server/appliances/debian.gns3a
+++ b/gns3server/appliances/debian.gns3a
@@ -24,20 +24,20 @@
},
"images": [
{
- "filename": "debian-11-genericcloud-amd64-20211220-862.qcow2",
- "version": "11.2",
- "md5sum": "3bdc52b0b3622a72095efdd001780a45",
- "filesize": 253231104,
+ "filename": "debian-11-genericcloud-amd64-20220328-962.qcow2",
+ "version": "11.3",
+ "md5sum": "7cf51e23747898485971a656ac2eb96d",
+ "filesize": 253296640,
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
- "direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20211220-862/debian-11-genericcloud-amd64-20211220-862.qcow2"
+ "direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20220328-962/debian-11-genericcloud-amd64-20220328-962.qcow2"
},
{
- "filename": "debian-10-genericcloud-amd64-20211011-792.qcow2",
- "version": "10.11",
- "md5sum": "ea4de19b17d114b6db813ee64a6b8284",
+ "filename": "debian-10-genericcloud-amd64-20220328-962.qcow2",
+ "version": "10.12",
+ "md5sum": "e92dfa1fc779fff807856f6ea6876e42",
"filesize": 232980480,
"download_url": "https://cloud.debian.org/images/cloud/buster/",
- "direct_download_url": "https://cloud.debian.org/images/cloud/buster/20211011-792/debian-10-genericcloud-amd64-20211011-792.qcow2"
+ "direct_download_url": "https://cloud.debian.org/images/cloud/buster/20220328-962/debian-10-genericcloud-amd64-20220328-962.qcow2"
},
{
"filename": "debian-cloud-init-data.iso",
@@ -49,16 +49,16 @@
],
"versions": [
{
- "name": "11.2",
+ "name": "11.3",
"images": {
- "hda_disk_image": "debian-11-genericcloud-amd64-20211220-862.qcow2",
+ "hda_disk_image": "debian-11-genericcloud-amd64-20220328-962.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
},
{
- "name": "10.11",
+ "name": "10.12",
"images": {
- "hda_disk_image": "debian-10-genericcloud-amd64-20211011-792.qcow2",
+ "hda_disk_image": "debian-10-genericcloud-amd64-20220328-962.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
}
diff --git a/gns3server/appliances/frr.gns3a b/gns3server/appliances/frr.gns3a
index c05e42cb..a909c7a6 100644
--- a/gns3server/appliances/frr.gns3a
+++ b/gns3server/appliances/frr.gns3a
@@ -22,6 +22,14 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "frr-8.1.0.qcow2",
+ "version": "8.1.0",
+ "md5sum": "836d6a207f63f99a4039378f2b0c6123",
+ "filesize": 54063616,
+ "download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
+ "direct_download_url": "http://downloads.sourceforge.net/project/gns-3/Qemu%20Appliances/frr-8.1.0.qcow2"
+ },
{
"filename": "frr-7.5.1.qcow2",
"version": "7.5.1",
@@ -40,6 +48,12 @@
}
],
"versions": [
+ {
+ "name": "8.1.0",
+ "images": {
+ "hda_disk_image": "frr-8.1.0.qcow2"
+ }
+ },
{
"name": "7.5.1",
"images": {
diff --git a/gns3server/appliances/opnsense.gns3a b/gns3server/appliances/opnsense.gns3a
index 8cab51b7..45583124 100644
--- a/gns3server/appliances/opnsense.gns3a
+++ b/gns3server/appliances/opnsense.gns3a
@@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "OPNsense-22.1.2-OpenSSL-nano-amd64.img",
+ "version": "22.1.2",
+ "md5sum": "3109030a214301b89a47eb9466e8b656",
+ "filesize": 3221225472,
+ "download_url": "https://opnsense.c0urier.net/releases/22.1/"
+ },
{
"filename": "OPNsense-21.7.1-OpenSSL-nano-amd64.img",
"version": "21.7.1",
@@ -48,6 +55,12 @@
}
],
"versions": [
+ {
+ "name": "22.1.2",
+ "images": {
+ "hda_disk_image": "OPNsense-22.1.2-OpenSSL-nano-amd64.img"
+ }
+ },
{
"name": "21.7.1",
"images": {
diff --git a/gns3server/appliances/ovs-snmp.gns3a b/gns3server/appliances/ovs-snmp.gns3a
new file mode 100644
index 00000000..2592022f
--- /dev/null
+++ b/gns3server/appliances/ovs-snmp.gns3a
@@ -0,0 +1,18 @@
+{
+ "appliance_id": "bfafb392-bb2b-4078-8817-29c55273fff6",
+ "name": "Open vSwitch with SNMP",
+ "category": "multilayer_switch",
+ "description": "Customized Open vSwtich with SNMP support",
+ "vendor_name": "Open vSwitch",
+ "vendor_url": "http://openvswitch.org/",
+ "product_name": "Open vSwitch",
+ "registry_version": 3,
+ "status": "stable",
+ "maintainer": "GNS3 Team",
+ "maintainer_email": "developers@gns3.net",
+ "usage": "",
+ "docker": {
+ "adapters": 8,
+ "image": "gns3/ovs-snmp:latest"
+ }
+}
\ No newline at end of file
diff --git a/gns3server/appliances/pfsense.gns3a b/gns3server/appliances/pfsense.gns3a
index ff640bfb..bf3cf1ae 100644
--- a/gns3server/appliances/pfsense.gns3a
+++ b/gns3server/appliances/pfsense.gns3a
@@ -24,6 +24,13 @@
"process_priority": "normal"
},
"images": [
+ {
+ "filename": "pfSense-CE-2.6.0-RELEASE-amd64.iso",
+ "version": "2.6.0",
+ "md5sum": "5ca6d4cb89977022d2e76c9158eeeb67",
+ "filesize": 767463424,
+ "download_url": "https://www.pfsense.org/download/mirror.php?section=downloads"
+ },
{
"filename": "pfSense-CE-2.5.2-RELEASE-amd64.iso",
"version": "2.5.2",
@@ -62,6 +69,13 @@
}
],
"versions": [
+ {
+ "name": "2.6.0",
+ "images": {
+ "hda_disk_image": "empty100G.qcow2",
+ "cdrom_image": "pfSense-CE-2.6.0-RELEASE-amd64.iso"
+ }
+ },
{
"name": "2.5.2",
"images": {
diff --git a/gns3server/appliances/sophos-xg.gns3a b/gns3server/appliances/sophos-xg.gns3a
index 24f94f88..54496c71 100644
--- a/gns3server/appliances/sophos-xg.gns3a
+++ b/gns3server/appliances/sophos-xg.gns3a
@@ -24,6 +24,20 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "VI-18.5.2_MR-2.KVM-380-PRIMARY.qcow2",
+ "version": "18.5.2 MR2",
+ "md5sum": "d3b99cd9519fae06e4ef348af34fef2b",
+ "filesize": 458555392,
+ "download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
+ },
+ {
+ "filename": "VI-18.5.2_MR-2.KVM-380-AUXILIARY.qcow2",
+ "version": "18.5.2 MR2",
+ "md5sum": "9cf2ebc15c92f712e28f8e45a29ee613",
+ "filesize": 11272192,
+ "download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
+ },
{
"filename": "VI-17.1.3_MR-3.KVM-250-PRIMARY.qcow2",
"version": "17.1.3 MR3",
@@ -124,6 +138,13 @@
}
],
"versions": [
+ {
+ "name": "18.5.2 MR2",
+ "images": {
+ "hda_disk_image": "VI-18.5.2_MR-2.KVM-380-PRIMARY.qcow2",
+ "hdb_disk_image": "VI-18.5.2_MR-2.KVM-380-AUXILIARY.qcow2"
+ }
+ },
{
"name": "17.1.3 MR3",
"images": {
diff --git a/gns3server/appliances/vyos.gns3a b/gns3server/appliances/vyos.gns3a
index 29517c7d..4004a1bc 100644
--- a/gns3server/appliances/vyos.gns3a
+++ b/gns3server/appliances/vyos.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "vyos-1.3.1-amd64.iso",
+ "version": "1.3.1",
+ "md5sum": "b6f57bd0cf9b60cdafa337b08ba4f2bc",
+ "filesize": 350224384,
+ "download_url": "https://support.vyos.io/en/downloads/files/vyos-1-3-1-generic-iso-image"
+ },
{
"filename": "vyos-1.3.0-amd64.iso",
"version": "1.3.0",
@@ -73,6 +80,13 @@
}
],
"versions": [
+ {
+ "name": "1.3.1",
+ "images": {
+ "hda_disk_image": "empty8G.qcow2",
+ "cdrom_image": "vyos-1.3.1-amd64.iso"
+ }
+ },
{
"name": "1.3.0",
"images": {
diff --git a/gns3server/compute/base_manager.py b/gns3server/compute/base_manager.py
index 5833a811..a96fe2e7 100644
--- a/gns3server/compute/base_manager.py
+++ b/gns3server/compute/base_manager.py
@@ -442,14 +442,6 @@ class BaseManager:
return path
raise ImageMissingError(orig_path)
- # For local server we allow using absolute path outside image directory
- if Config.instance().settings.Server.local is True:
- log.debug(f"Searching for '{orig_path}'")
- path = force_unix_path(path)
- if os.path.exists(path):
- return path
- raise ImageMissingError(orig_path)
-
# Check to see if path is an absolute path to a valid directory
path = force_unix_path(path)
for directory in valid_directory_prefices:
@@ -514,7 +506,7 @@ class BaseManager:
"""
try:
- return list_images(self._NODE_TYPE)
+ return await list_images(self._NODE_TYPE)
except OSError as e:
raise ComputeError(f"Can not list images {e}")
diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py
index e48d8563..acd0e677 100644
--- a/gns3server/compute/docker/docker_vm.py
+++ b/gns3server/compute/docker/docker_vm.py
@@ -333,16 +333,17 @@ class DockerVM(BaseNode):
os.makedirs(os.path.join(path, "if-down.d"), exist_ok=True)
os.makedirs(os.path.join(path, "if-pre-up.d"), exist_ok=True)
os.makedirs(os.path.join(path, "if-post-down.d"), exist_ok=True)
+ os.makedirs(os.path.join(path, "interfaces.d"), exist_ok=True)
if not os.path.exists(os.path.join(path, "interfaces")):
with open(os.path.join(path, "interfaces"), "w+") as f:
- f.write(
- """#
-# This is a sample network config uncomment lines to configure the network
+ f.write("""#
+# This is a sample network config, please uncomment lines to configure the network
#
-"""
- )
+# Uncomment this line to load custom interface files
+# source /etc/network/interfaces.d/*
+""")
for adapter in range(0, self.adapters):
f.write(
"""
@@ -355,11 +356,9 @@ class DockerVM(BaseNode):
#\tup echo nameserver 192.168.{adapter}.1 > /etc/resolv.conf
# DHCP config for eth{adapter}
-# auto eth{adapter}
-# iface eth{adapter} inet dhcp""".format(
- adapter=adapter
- )
- )
+#auto eth{adapter}
+#iface eth{adapter} inet dhcp
+""".format(adapter=adapter))
return path
async def create(self):
diff --git a/gns3server/compute/dynamips/nodes/router.py b/gns3server/compute/dynamips/nodes/router.py
index f75b5160..046548a6 100644
--- a/gns3server/compute/dynamips/nodes/router.py
+++ b/gns3server/compute/dynamips/nodes/router.py
@@ -163,7 +163,7 @@ class Router(BaseNode):
"dynamips_id": self._dynamips_id,
"platform": self._platform,
"image": self._image,
- "image_md5sum": md5sum(self._image),
+ "image_md5sum": md5sum(self._image, self._working_directory),
"ram": self._ram,
"nvram": self._nvram,
"mmap": self._mmap,
diff --git a/gns3server/compute/iou/iou_vm.py b/gns3server/compute/iou/iou_vm.py
index 3ca8f371..63e9f528 100644
--- a/gns3server/compute/iou/iou_vm.py
+++ b/gns3server/compute/iou/iou_vm.py
@@ -231,7 +231,7 @@ class IOUVM(BaseNode):
"status": self.status,
"project_id": self.project.id,
"path": self.path,
- "md5sum": gns3server.utils.images.md5sum(self.path),
+ "md5sum": gns3server.utils.images.md5sum(self.path, self.working_path),
"ethernet_adapters": len(self._ethernet_adapters),
"serial_adapters": len(self._serial_adapters),
"ram": self._ram,
diff --git a/gns3server/compute/project.py b/gns3server/compute/project.py
index d2538d16..38ae07e8 100644
--- a/gns3server/compute/project.py
+++ b/gns3server/compute/project.py
@@ -85,10 +85,6 @@ class Project:
"variables": self._variables
}
- def is_local(self):
-
- return Config.instance().settings.Server.local
-
@property
def id(self):
@@ -101,12 +97,12 @@ class Project:
@path.setter
def path(self, path):
- check_path_allowed(path)
if hasattr(self, "_path"):
- if path != self._path and self.is_local() is False:
+ if path != self._path:
raise ComputeForbiddenError("Changing the project directory path is not allowed")
+ check_path_allowed(path)
self._path = path
@property
diff --git a/gns3server/compute/qemu/__init__.py b/gns3server/compute/qemu/__init__.py
index 400f5222..58332b2e 100644
--- a/gns3server/compute/qemu/__init__.py
+++ b/gns3server/compute/qemu/__init__.py
@@ -234,68 +234,6 @@ class Qemu(BaseManager):
return os.path.join("qemu", f"vm-{legacy_vm_id}")
- async def create_disk(self, qemu_img, path, options):
- """
- Create a Qemu disk with qemu-img
-
- :param qemu_img: qemu-img binary path
- :param path: Image path
- :param options: Disk image creation options
- """
-
- try:
- img_format = options.pop("format")
- img_size = options.pop("size")
-
- if not os.path.isabs(path):
- directory = self.get_images_directory()
- os.makedirs(directory, exist_ok=True)
- path = os.path.join(directory, os.path.basename(path))
-
- try:
- if os.path.exists(path):
- raise QemuError(f"Could not create disk image '{path}', file already exists")
- except UnicodeEncodeError:
- raise QemuError(
- "Could not create disk image '{}', "
- "path contains characters not supported by filesystem".format(path)
- )
-
- command = [qemu_img, "create", "-f", img_format]
- for option in sorted(options.keys()):
- command.extend(["-o", f"{option}={options[option]}"])
- command.append(path)
- command.append(f"{img_size}M")
-
- process = await asyncio.create_subprocess_exec(*command)
- await process.wait()
- except (OSError, subprocess.SubprocessError) as e:
- raise QemuError(f"Could not create disk image {path}:{e}")
-
- async def resize_disk(self, qemu_img, path, extend):
- """
- Resize a Qemu disk with qemu-img
-
- :param qemu_img: qemu-img binary path
- :param path: Image path
- :param size: size
- """
-
- if not os.path.isabs(path):
- directory = self.get_images_directory()
- os.makedirs(directory, exist_ok=True)
- path = os.path.join(directory, os.path.basename(path))
-
- try:
- if not os.path.exists(path):
- raise QemuError(f"Qemu disk '{path}' does not exist")
- command = [qemu_img, "resize", path, f"+{extend}M"]
- process = await asyncio.create_subprocess_exec(*command)
- await process.wait()
- log.info(f"Qemu disk '{path}' extended by {extend} MB")
- except (OSError, subprocess.SubprocessError) as e:
- raise QemuError(f"Could not update disk image {path}:{e}")
-
def _init_config_disk(self):
"""
Initialize the default config disk
diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py
index e25f1467..313a5c32 100644
--- a/gns3server/compute/qemu/qemu_vm.py
+++ b/gns3server/compute/qemu/qemu_vm.py
@@ -280,7 +280,7 @@ class QemuVM(BaseNode):
:param value: New disk value
"""
- value = self.manager.get_abs_image_path(value, self.project.path)
+ value = self.manager.get_abs_image_path(value, self.working_dir)
if not self.linked_clone:
for node in self.manager.nodes:
if node != self and getattr(node, variable) == value:
@@ -493,7 +493,7 @@ class QemuVM(BaseNode):
"""
if cdrom_image:
- self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.project.path)
+ self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.working_dir)
log.info(
'QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}'.format(
@@ -551,7 +551,7 @@ class QemuVM(BaseNode):
:param bios_image: QEMU bios image path
"""
- self._bios_image = self.manager.get_abs_image_path(bios_image, self.project.path)
+ self._bios_image = self.manager.get_abs_image_path(bios_image, self.working_dir)
log.info(
'QEMU VM "{name}" [{id}] has set the QEMU bios image path to {bios_image}'.format(
name=self._name, id=self._id, bios_image=self._bios_image
@@ -923,7 +923,7 @@ class QemuVM(BaseNode):
:param initrd: QEMU initrd path
"""
- initrd = self.manager.get_abs_image_path(initrd, self.project.path)
+ initrd = self.manager.get_abs_image_path(initrd, self.working_dir)
log.info(
'QEMU VM "{name}" [{id}] has set the QEMU initrd path to {initrd}'.format(
@@ -957,7 +957,7 @@ class QemuVM(BaseNode):
:param kernel_image: QEMU kernel image path
"""
- kernel_image = self.manager.get_abs_image_path(kernel_image, self.project.path)
+ kernel_image = self.manager.get_abs_image_path(kernel_image, self.working_dir)
log.info(
'QEMU VM "{name}" [{id}] has set the QEMU kernel image path to {kernel_image}'.format(
name=self._name, id=self._id, kernel_image=kernel_image
@@ -1057,10 +1057,10 @@ class QemuVM(BaseNode):
# In case user upload image manually we don't have md5 sums.
# We need generate hashes at this point, otherwise they will be generated
# at asdict but not on separate thread.
- await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image)
- await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image)
- await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image)
- await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image)
+ await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image, self.working_dir)
+ await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image, self.working_dir)
+ await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image, self.working_dir)
+ await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image, self.working_dir)
super().create()
@@ -1599,6 +1599,85 @@ class QemuVM(BaseNode):
)
)
+ async def create_disk_image(self, disk_name, options):
+ """
+ Create a Qemu disk
+
+ :param disk_name: disk name
+ :param options: disk creation options
+ """
+
+ try:
+ qemu_img_path = self._get_qemu_img()
+ img_format = options.pop("format")
+ img_size = options.pop("size")
+ disk_path = os.path.join(self.working_dir, disk_name)
+
+ try:
+ if os.path.exists(disk_path):
+ raise QemuError(f"Could not create disk image '{disk_name}', file already exists")
+ except UnicodeEncodeError:
+ raise QemuError(
+ f"Could not create disk image '{disk_name}', "
+ "Disk image name contains characters not supported by the filesystem"
+ )
+
+ command = [qemu_img_path, "create", "-f", img_format]
+ for option in sorted(options.keys()):
+ command.extend(["-o", f"{option}={options[option]}"])
+ command.append(disk_path)
+ command.append(f"{img_size}M")
+ retcode = await self._qemu_img_exec(command)
+ if retcode:
+ stdout = self.read_qemu_img_stdout()
+ raise QemuError(f"Could not create '{disk_name}' disk image: qemu-img returned with {retcode}\n{stdout}")
+ else:
+ log.info(f"QEMU VM '{self.name}' [{self.id}]: Qemu disk image'{disk_name}' created")
+ except (OSError, subprocess.SubprocessError) as e:
+ stdout = self.read_qemu_img_stdout()
+ raise QemuError(f"Could not create '{disk_name}' disk image: {e}\n{stdout}")
+
+ async def resize_disk_image(self, disk_name, extend):
+ """
+ Resize a Qemu disk
+
+ :param disk_name: disk name
+ :param extend: new size
+ """
+
+ try:
+ qemu_img_path = self._get_qemu_img()
+ disk_path = os.path.join(self.working_dir, disk_name)
+ if not os.path.exists(disk_path):
+ raise QemuError(f"Qemu disk image '{disk_name}' does not exist")
+
+ command = [qemu_img_path, "resize", disk_path, f"+{extend}M"]
+ retcode = await self._qemu_img_exec(command)
+ if retcode:
+ stdout = self.read_qemu_img_stdout()
+ raise QemuError(f"Could not update '{disk_name}' disk image: qemu-img returned with {retcode}\n{stdout}")
+ else:
+ log.info(f"QEMU VM '{self.name}' [{self.id}]: Qemu disk image '{disk_name}' extended by {extend} MB")
+ except (OSError, subprocess.SubprocessError) as e:
+ stdout = self.read_qemu_img_stdout()
+ raise QemuError(f"Could not update '{disk_name}' disk image: {e}\n{stdout}")
+
+ def delete_disk_image(self, disk_name):
+ """
+ Delete a Qemu disk
+
+ :param disk_name: disk name
+ """
+
+ disk_path = os.path.join(self.working_dir, disk_name)
+ if not os.path.exists(disk_path):
+ raise QemuError(f"Qemu disk image '{disk_name}' does not exist")
+
+ try:
+ os.remove(disk_path)
+ except OSError as e:
+ raise QemuError(f"Could not delete '{disk_name}' disk image: {e}")
+
@property
def started(self):
"""
@@ -1791,7 +1870,8 @@ class QemuVM(BaseNode):
*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir
)
retcode = await process.wait()
- log.info(f"{self._get_qemu_img()} returned with {retcode}")
+ if retcode != 0:
+ log.info(f"{self._get_qemu_img()} returned with {retcode}")
return retcode
async def _find_disk_file_format(self, disk):
@@ -1978,7 +2058,7 @@ class QemuVM(BaseNode):
drives = ["a", "b", "c", "d"]
for disk_index, drive in enumerate(drives):
- # prioritize config disk over harddisk d
+ # prioritize config disk over normal disks
if drive == "d" and self._create_config_disk:
continue
@@ -1992,34 +2072,44 @@ class QemuVM(BaseNode):
interface = "ide"
setattr(self, f"hd{drive}_disk_interface", interface)
- disk_name = "hd" + drive
+ disk_name = f"hd{drive}"
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
if os.path.islink(disk_image):
raise QemuError(
- f"{disk_name} disk image '{disk_image}' linked to '{os.path.realpath(disk_image)}' is not accessible"
+ f"'{disk_name}' disk image linked to "
+ f"'{os.path.realpath(disk_image)}' is not accessible"
)
else:
- raise QemuError(f"{disk_name} disk image '{disk_image}' is not accessible")
+ raise QemuError(f"'{disk_image}' is not accessible")
else:
try:
# check for corrupt disk image
retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image])
+ # ignore retcode == 1, one reason is that the image is encrypted and
+ # there is no encrypt.key-secret available
if retcode == 3:
# image has leaked clusters, but is not corrupted, let's try to fix it
- log.warning(f"Qemu image {disk_image} has leaked clusters")
- if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)]) == 3:
- self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)})
+ log.warning(f"Disk image '{disk_image}' has leaked clusters")
+ if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", f"{disk_image}"]) == 3:
+ self.project.emit(
+ "log.warning",
+ {"message": f"Disk image '{disk_image}' has leaked clusters and could not be fixed"}
+ )
elif retcode == 2:
# image is corrupted, let's try to fix it
- log.warning(f"Qemu image {disk_image} is corrupted")
- if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)]) == 2:
- self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)})
- # ignore retcode == 1. One reason is that the image is encrypted and there is no encrypt.key-secret available
+ log.warning(f"Disk image '{disk_image}' is corrupted")
+ if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", f"{disk_image}"]) == 2:
+ self.project.emit(
+ "log.warning",
+ {"message": f"Disk image '{disk_image}' is corrupted and could not be fixed"}
+ )
except (OSError, subprocess.SubprocessError) as e:
stdout = self.read_qemu_img_stdout()
raise QemuError(f"Could not check '{disk_name}' disk image: {e}\n{stdout}")
- if self.linked_clone:
+ if self.linked_clone and os.path.dirname(disk_image) != self.working_dir:
+
+ #cloned_disk_image = os.path.splitext(os.path.basename(disk_image))
disk = os.path.join(self.working_dir, f"{disk_name}_disk.qcow2")
if not os.path.exists(disk):
# create the disk
@@ -2027,9 +2117,9 @@ class QemuVM(BaseNode):
else:
backing_file_format = await self._find_disk_file_format(disk_image)
if not backing_file_format:
- raise QemuError("Could not detect format for disk image: {}".format(disk_image))
+ raise QemuError(f"Could not detect format for disk image '{disk_image}'")
# Rebase the image. This is in case the base image moved to a different directory,
- # which will be the case if we imported a portable project. This uses
+ # which will be the case if we imported a portable project. This uses
# get_abs_image_path(hdX_disk_image) and ignores the old base path embedded
# in the qcow2 file itself.
try:
@@ -2406,20 +2496,30 @@ class QemuVM(BaseNode):
answer[field] = getattr(self, field)
except AttributeError:
pass
- answer["hda_disk_image"] = self.manager.get_relative_image_path(self._hda_disk_image, self.project.path)
- answer["hda_disk_image_md5sum"] = md5sum(self._hda_disk_image)
- answer["hdb_disk_image"] = self.manager.get_relative_image_path(self._hdb_disk_image, self.project.path)
- answer["hdb_disk_image_md5sum"] = md5sum(self._hdb_disk_image)
- answer["hdc_disk_image"] = self.manager.get_relative_image_path(self._hdc_disk_image, self.project.path)
- answer["hdc_disk_image_md5sum"] = md5sum(self._hdc_disk_image)
- answer["hdd_disk_image"] = self.manager.get_relative_image_path(self._hdd_disk_image, self.project.path)
- answer["hdd_disk_image_md5sum"] = md5sum(self._hdd_disk_image)
- answer["cdrom_image"] = self.manager.get_relative_image_path(self._cdrom_image, self.project.path)
- answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image)
- answer["bios_image"] = self.manager.get_relative_image_path(self._bios_image, self.project.path)
- answer["bios_image_md5sum"] = md5sum(self._bios_image)
- answer["initrd"] = self.manager.get_relative_image_path(self._initrd, self.project.path)
- answer["initrd_md5sum"] = md5sum(self._initrd)
- answer["kernel_image"] = self.manager.get_relative_image_path(self._kernel_image, self.project.path)
- answer["kernel_image_md5sum"] = md5sum(self._kernel_image)
+
+ for drive in ["a", "b", "c", "d"]:
+ disk_image = getattr(self, f"_hd{drive}_disk_image")
+ if not disk_image:
+ continue
+ answer[f"hd{drive}_disk_image"] = self.manager.get_relative_image_path(disk_image, self.working_dir)
+ answer[f"hd{drive}_disk_image_md5sum"] = md5sum(disk_image, self.working_dir)
+
+ local_disk = os.path.join(self.working_dir, f"hd{drive}_disk.qcow2")
+ if os.path.exists(local_disk):
+ try:
+ qcow2 = Qcow2(local_disk)
+ if qcow2.backing_file:
+ answer[f"hd{drive}_disk_image_backed"] = os.path.basename(local_disk)
+ except (Qcow2Error, OSError) as e:
+ log.error(f"Could not read qcow2 disk image '{local_disk}': {e}")
+ continue
+
+ answer["cdrom_image"] = self.manager.get_relative_image_path(self._cdrom_image, self.working_dir)
+ answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image, self.working_dir)
+ answer["bios_image"] = self.manager.get_relative_image_path(self._bios_image, self.working_dir)
+ answer["bios_image_md5sum"] = md5sum(self._bios_image, self.working_dir)
+ answer["initrd"] = self.manager.get_relative_image_path(self._initrd, self.working_dir)
+ answer["initrd_md5sum"] = md5sum(self._initrd, self.working_dir)
+ answer["kernel_image"] = self.manager.get_relative_image_path(self._kernel_image, self.working_dir)
+ answer["kernel_image_md5sum"] = md5sum(self._kernel_image, self.working_dir)
return answer
diff --git a/gns3server/compute/vmware/__init__.py b/gns3server/compute/vmware/__init__.py
index f0f12d33..606e8a03 100644
--- a/gns3server/compute/vmware/__init__.py
+++ b/gns3server/compute/vmware/__init__.py
@@ -311,8 +311,8 @@ class VMware(BaseManager):
vmnet_interfaces = self._get_vmnet_interfaces_ubridge()
else:
vmnet_interfaces = self._get_vmnet_interfaces()
- vmnet_interfaces = list(vmnet_interfaces.keys())
self._vmnets_info = vmnet_interfaces.copy()
+ vmnet_interfaces = list(vmnet_interfaces.keys())
# remove vmnets already in use
for vmware_vm in self._nodes.values():
diff --git a/gns3server/controller/appliance_manager.py b/gns3server/controller/appliance_manager.py
index b7874deb..17d8374a 100644
--- a/gns3server/controller/appliance_manager.py
+++ b/gns3server/controller/appliance_manager.py
@@ -153,8 +153,14 @@ class ApplianceManager:
version_images[appliance_key] = image_in_db.filename
else:
# check if the image is on disk
+ # FIXME: still necessary? the image should have been discovered and saved in the db already
image_path = os.path.join(image_dir, appliance_file)
- if os.path.exists(image_path) and await wait_run_in_executor(md5sum, image_path) == image_checksum:
+ if os.path.exists(image_path) and \
+ await wait_run_in_executor(
+ md5sum,
+ image_path,
+ cache_to_md5file=False
+ ) == image_checksum:
async with aiofiles.open(image_path, "rb") as f:
await write_image(appliance_file, image_path, f, images_repo)
else:
diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py
index f23d450e..d8caa17c 100644
--- a/gns3server/controller/compute.py
+++ b/gns3server/controller/compute.py
@@ -30,10 +30,13 @@ from ..utils import parse_version
from ..utils.asyncio import locking
from ..controller.controller_error import (
ControllerError,
+ ControllerBadRequestError,
ControllerNotFoundError,
ControllerForbiddenError,
ControllerTimeoutError,
ControllerUnauthorizedError,
+ ComputeError,
+ ComputeConflictError
)
from ..version import __version__, __version_info__
@@ -43,23 +46,6 @@ import logging
log = logging.getLogger(__name__)
-class ComputeError(ControllerError):
- pass
-
-
-# FIXME: broken
-class ComputeConflict(ComputeError):
- """
- Raise when the compute send a 409 that we can handle
-
- :param response: The response of the compute
- """
-
- def __init__(self, response):
- super().__init__(response["message"])
- self.response = response
-
-
class Compute:
"""
A GNS3 compute.
@@ -574,7 +560,9 @@ class Compute:
else:
msg = ""
- if response.status == 401:
+ if response.status == 400:
+ raise ControllerBadRequestError(msg)
+ elif response.status == 401:
raise ControllerUnauthorizedError(f"Invalid authentication for compute '{self.name}' [{self.id}]")
elif response.status == 403:
raise ControllerForbiddenError(msg)
@@ -584,7 +572,7 @@ class Compute:
raise ControllerTimeoutError(f"{method} {path} request timeout")
elif response.status == 409:
try:
- raise ComputeConflict(json.loads(body))
+ raise ComputeConflictError(url, json.loads(body))
# If the 409 doesn't come from a GNS3 server
except ValueError:
raise ControllerError(msg)
@@ -593,7 +581,7 @@ class Compute:
elif response.status == 503:
raise aiohttp.web.HTTPServiceUnavailable(text=f"Service unavailable {url} {body}")
else:
- raise NotImplementedError(f"{response.status} status code is not supported for {method} '{url}'")
+ raise NotImplementedError(f"{response.status} status code is not supported for {method} '{url}'\n{body}")
if body and len(body):
if raw:
response.body = body
@@ -636,16 +624,12 @@ class Compute:
"""
Return the list of images available for this type on the compute node.
"""
- images = []
res = await self.http_query("GET", f"/{type}/images", timeout=None)
images = res.json
try:
if type in ["qemu", "dynamips", "iou"]:
- # for local_image in list_images(type):
- # if local_image['filename'] not in [i['filename'] for i in images]:
- # images.append(local_image)
images = sorted(images, key=itemgetter("filename"))
else:
images = sorted(images, key=itemgetter("image"))
diff --git a/gns3server/controller/controller_error.py b/gns3server/controller/controller_error.py
index 515c88fd..c6f53149 100644
--- a/gns3server/controller/controller_error.py
+++ b/gns3server/controller/controller_error.py
@@ -51,3 +51,27 @@ class ControllerForbiddenError(ControllerError):
class ControllerTimeoutError(ControllerError):
def __init__(self, message: str):
super().__init__(message)
+
+
+class ComputeError(ControllerError):
+ pass
+
+
+class ComputeConflictError(ComputeError):
+ """
+ Raise when the compute sends a 409 that we can handle
+
+ :param request URL: compute URL used for the request
+ :param response: compute JSON response
+ """
+
+ def __init__(self, url, response):
+ super().__init__(response["message"])
+ self._url = url
+ self._response = response
+
+ def url(self):
+ return self._url
+
+ def response(self):
+ return self._response
diff --git a/gns3server/controller/export_project.py b/gns3server/controller/export_project.py
index 3b308a3f..4ae976d2 100644
--- a/gns3server/controller/export_project.py
+++ b/gns3server/controller/export_project.py
@@ -16,7 +16,6 @@
# along with this program. If not, see .
import os
-import sys
import json
import asyncio
import aiofiles
diff --git a/gns3server/controller/import_project.py b/gns3server/controller/import_project.py
index f653cece..545c4ac1 100644
--- a/gns3server/controller/import_project.py
+++ b/gns3server/controller/import_project.py
@@ -20,10 +20,10 @@ import sys
import json
import uuid
import shutil
-import zipfile
import aiofiles
import itertools
import tempfile
+import gns3server.utils.zipfile_zstd as zipfile_zstd
from .controller_error import ControllerError
from .topology import load_topology
@@ -60,9 +60,9 @@ async def import_project(controller, project_id, stream, location=None, name=Non
raise ControllerError("The destination path should not contain .gns3")
try:
- with zipfile.ZipFile(stream) as zip_file:
+ with zipfile_zstd.ZipFile(stream) as zip_file:
project_file = zip_file.read("project.gns3").decode()
- except zipfile.BadZipFile:
+ except zipfile_zstd.BadZipFile:
raise ControllerError("Cannot import project, not a GNS3 project (invalid zip)")
except KeyError:
raise ControllerError("Cannot import project, project.gns3 file could not be found")
@@ -92,9 +92,9 @@ async def import_project(controller, project_id, stream, location=None, name=Non
raise ControllerError("The project name contain non supported or invalid characters")
try:
- with zipfile.ZipFile(stream) as zip_file:
+ with zipfile_zstd.ZipFile(stream) as zip_file:
await wait_run_in_executor(zip_file.extractall, path)
- except zipfile.BadZipFile:
+ except zipfile_zstd.BadZipFile:
raise ControllerError("Cannot extract files from GNS3 project (invalid zip)")
topology = load_topology(os.path.join(path, "project.gns3"))
@@ -264,11 +264,11 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
# extract everything to a temporary directory
try:
with open(snapshot_path, "rb") as f:
- with zipfile.ZipFile(f) as zip_file:
+ with zipfile_zstd.ZipFile(f) as zip_file:
await wait_run_in_executor(zip_file.extractall, tmpdir)
except OSError as e:
raise ControllerError(f"Cannot open snapshot '{os.path.basename(snapshot)}': {e}")
- except zipfile.BadZipFile:
+ except zipfile_zstd.BadZipFile:
raise ControllerError(
f"Cannot extract files from snapshot '{os.path.basename(snapshot)}': not a GNS3 project (invalid zip)"
)
@@ -294,7 +294,7 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
# write everything back to the original snapshot file
try:
- with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
+ with aiozipstream.ZipFile(compression=zipfile_zstd.ZIP_STORED) as zstream:
for root, dirs, files in os.walk(tmpdir, topdown=True, followlinks=False):
for file in files:
path = os.path.join(root, file)
diff --git a/gns3server/controller/node.py b/gns3server/controller/node.py
index c6a046e4..9d1673af 100644
--- a/gns3server/controller/node.py
+++ b/gns3server/controller/node.py
@@ -21,8 +21,12 @@ import copy
import uuid
import os
-from .compute import ComputeConflict, ComputeError
-from .controller_error import ControllerError, ControllerTimeoutError
+from .controller_error import (
+ ControllerError,
+ ControllerTimeoutError,
+ ComputeError,
+ ComputeConflictError
+)
from .ports.port_factory import PortFactory, StandardPortFactory, DynamipsPortFactory
from ..utils.images import images_directories
from ..config import Config
@@ -400,9 +404,10 @@ class Node:
response = await self._compute.post(
f"/projects/{self._project.id}/{self._node_type}/nodes", data=data, timeout=timeout
)
- except ComputeConflict as e:
- if e.response.get("exception") == "ImageMissingError":
- res = await self._upload_missing_image(self._node_type, e.response["image"])
+ except ComputeConflictError as e:
+ response = e.response()
+ if response.get("exception") == "ImageMissingError":
+ res = await self._upload_missing_image(self._node_type, response["image"])
if not res:
raise e
else:
diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py
index 996c1a05..ff7238b2 100644
--- a/gns3server/controller/project.py
+++ b/gns3server/controller/project.py
@@ -1038,7 +1038,7 @@ class Project:
while self._loading:
await asyncio.sleep(0.5)
- async def duplicate(self, name=None, location=None, reset_mac_addresses=True):
+ async def duplicate(self, name=None, reset_mac_addresses=True):
"""
Duplicate a project
@@ -1047,7 +1047,6 @@ class Project:
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
- :param location: Parent directory of the new project
:param reset_mac_addresses: Reset MAC addresses for the new project
"""
# If the project was not open we open it temporary
@@ -1062,11 +1061,8 @@ class Project:
# use the parent directory of the project we are duplicating as a
# temporary directory to avoid no space left issues when '/tmp'
- # is location on another partition.
- if location:
- working_dir = os.path.abspath(os.path.join(location, os.pardir))
- else:
- working_dir = os.path.abspath(os.path.join(self.path, os.pardir))
+ # is located on another partition.
+ working_dir = os.path.abspath(os.path.join(self.path, os.pardir))
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
# Do not compress the exported project when duplicating
@@ -1090,7 +1086,11 @@ class Project:
# import the temporary project
with open(project_path, "rb") as f:
project = await import_project(
- self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True
+ self._controller,
+ str(uuid.uuid4()),
+ f,
+ name=name,
+ keep_compute_id=True
)
log.info(f"Project '{project.name}' duplicated in {time.time() - begin:.4f} seconds")
diff --git a/gns3server/controller/topology.py b/gns3server/controller/topology.py
index 3b1f4473..a6bd2807 100644
--- a/gns3server/controller/topology.py
+++ b/gns3server/controller/topology.py
@@ -224,7 +224,7 @@ def _convert_2_1_0(topo, topo_path):
if node["node_type"] in ("qemu", "vmware", "virtualbox"):
if "acpi_shutdown" in node["properties"]:
if node["properties"]["acpi_shutdown"] is True:
- node["properties"]["on_close"] = "save_vm_sate"
+ node["properties"]["on_close"] = "save_vm_state"
else:
node["properties"]["on_close"] = "power_off"
del node["properties"]["acpi_shutdown"]
diff --git a/gns3server/core/tasks.py b/gns3server/core/tasks.py
index 85df5791..6d363baa 100644
--- a/gns3server/core/tasks.py
+++ b/gns3server/core/tasks.py
@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import sys
import asyncio
from typing import Callable
@@ -25,7 +24,8 @@ from gns3server.controller import Controller
from gns3server.compute import MODULES
from gns3server.compute.port_manager import PortManager
from gns3server.utils.http_client import HTTPClient
-from gns3server.db.tasks import connect_to_db, get_computes
+from gns3server.db.tasks import connect_to_db, get_computes, disconnect_from_db, discover_images_on_filesystem
+
import logging
@@ -60,7 +60,9 @@ def create_startup_handler(app: FastAPI) -> Callable:
# computing with server start
from gns3server.compute.qemu import Qemu
- asyncio.ensure_future(Qemu.instance().list_images())
+ # Start the discovering new images on file system 5 seconds after the server has started
+ # to give it a chance to process API requests
+ loop.call_later(5, asyncio.create_task, discover_images_on_filesystem(app))
for module in MODULES:
log.debug(f"Loading module {module.__name__}")
@@ -90,4 +92,6 @@ def create_shutdown_handler(app: FastAPI) -> Callable:
if PortManager.instance().udp_ports:
log.warning(f"UDP ports are still used {PortManager.instance().udp_ports}")
+ await disconnect_from_db(app)
+
return shutdown_handler
diff --git a/gns3server/crash_report.py b/gns3server/crash_report.py
index 73cc22d0..9c534d9d 100644
--- a/gns3server/crash_report.py
+++ b/gns3server/crash_report.py
@@ -59,7 +59,7 @@ class CrashReport:
Report crash to a third party service
"""
- DSN = "https://8f474628c1e44d0799140ccf05c486b8:f952ab1783d3427188fd81cc37da323c@o19455.ingest.sentry.io/38482"
+ DSN = "https://57f6b1102b6a4985a8e93aed51e19b8b@o19455.ingest.sentry.io/38482"
_instance = None
def __init__(self):
diff --git a/gns3server/db/repositories/images.py b/gns3server/db/repositories/images.py
index 17bf4c71..54964eff 100644
--- a/gns3server/db/repositories/images.py
+++ b/gns3server/db/repositories/images.py
@@ -59,12 +59,15 @@ class ImagesRepository(BaseRepository):
result = await self._db_session.execute(query)
return result.scalars().first()
- async def get_images(self) -> List[models.Image]:
+ async def get_images(self, image_type=None) -> List[models.Image]:
"""
Get all images.
"""
- query = select(models.Image)
+ if image_type:
+ query = select(models.Image).where(models.Image.image_type == image_type)
+ else:
+ query = select(models.Image)
result = await self._db_session.execute(query)
return result.scalars().all()
diff --git a/gns3server/db/tasks.py b/gns3server/db/tasks.py
index 7c40a6fb..99853993 100644
--- a/gns3server/db/tasks.py
+++ b/gns3server/db/tasks.py
@@ -15,11 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+import asyncio
+import signal
import os
from fastapi import FastAPI
-from fastapi.encoders import jsonable_encoder
from pydantic import ValidationError
+from watchfiles import awatch, Change
from typing import List
from sqlalchemy import event
@@ -27,6 +29,8 @@ from sqlalchemy.engine import Engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from gns3server.db.repositories.computes import ComputesRepository
+from gns3server.db.repositories.images import ImagesRepository
+from gns3server.utils.images import discover_images, check_valid_image_header, read_image_info, InvalidImageError
from gns3server import schemas
from .models import Base
@@ -51,6 +55,14 @@ async def connect_to_db(app: FastAPI) -> None:
log.fatal(f"Error while connecting to database '{db_url}: {e}")
+async def disconnect_from_db(app: FastAPI) -> None:
+
+ # dispose of the connection pool used by the database engine
+ if getattr(app.state, "_db_engine"):
+ await app.state._db_engine.dispose()
+ log.info(f"Disconnected from database")
+
+
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
@@ -74,3 +86,94 @@ async def get_computes(app: FastAPI) -> List[dict]:
continue
computes.append(compute)
return computes
+
+
+def image_filter(change: Change, path: str) -> bool:
+
+ if change == Change.added:
+ header_magic_len = 7
+ with open(path, "rb") as f:
+ image_header = f.read(header_magic_len) # read the first 7 bytes of the file
+ if len(image_header) >= header_magic_len:
+ try:
+ check_valid_image_header(image_header)
+ except InvalidImageError as e:
+ log.debug(f"New image '{path}' added: {e}")
+ return False
+ else:
+ log.debug(f"New image '{path}' added: size is too small to be valid")
+ return False
+ return True
+ # FIXME: should we support image deletion?
+ # elif change == Change.deleted:
+ # return True
+ return False
+
+
+async def monitor_images_on_filesystem(app: FastAPI):
+
+ server_config = Config.instance().settings.Server
+ images_dir = os.path.expanduser(server_config.images_path)
+
+ try:
+ async for changes in awatch(
+ images_dir,
+ watch_filter=image_filter,
+ raise_interrupt=True
+ ):
+ async with AsyncSession(app.state._db_engine) as db_session:
+ images_repository = ImagesRepository(db_session)
+ for change in changes:
+ change_type, image_path = change
+ if change_type == Change.added:
+ try:
+ image = await read_image_info(image_path)
+ except InvalidImageError as e:
+ log.warning(str(e))
+ continue
+ try:
+ if await images_repository.get_image(image_path):
+ continue
+ await images_repository.add_image(**image)
+ log.info(f"Discovered image '{image_path}' has been added to the database")
+ except SQLAlchemyError as e:
+ log.warning(f"Error while adding image '{image_path}' to the database: {e}")
+ # if change_type == Change.deleted:
+ # try:
+ # if await images_repository.get_image(image_path):
+ # success = await images_repository.delete_image(image_path)
+ # if not success:
+ # log.warning(f"Could not delete image '{image_path}' from the database")
+ # else:
+ # log.info(f"Image '{image_path}' has been deleted from the database")
+ # except SQLAlchemyError as e:
+ # log.warning(f"Error while deleting image '{image_path}' from the database: {e}")
+ except KeyboardInterrupt:
+ # send SIGTERM to the server PID so uvicorn can shutdown the process
+ os.kill(os.getpid(), signal.SIGTERM)
+
+
+async def discover_images_on_filesystem(app: FastAPI):
+
+ async with AsyncSession(app.state._db_engine) as db_session:
+ images_repository = ImagesRepository(db_session)
+ db_images = await images_repository.get_images()
+ existing_image_paths = []
+ for db_image in db_images:
+ try:
+ image = schemas.Image.from_orm(db_image)
+ existing_image_paths.append(image.path)
+ except ValidationError as e:
+ log.error(f"Could not load image '{db_image.filename}' from database: {e}")
+ continue
+ for image_type in ("qemu", "ios", "iou"):
+ discovered_images = await discover_images(image_type, existing_image_paths)
+ for image in discovered_images:
+ log.info(f"Adding discovered image '{image['path']}' to the database")
+ try:
+ await images_repository.add_image(**image)
+ except SQLAlchemyError as e:
+ log.warning(f"Error while adding image '{image['path']}' to the database: {e}")
+
+ # monitor if images have been manually added
+ asyncio.create_task(monitor_images_on_filesystem(app))
diff --git a/gns3server/schemas/__init__.py b/gns3server/schemas/__init__.py
index ae9b7316..77a5c9c3 100644
--- a/gns3server/schemas/__init__.py
+++ b/gns3server/schemas/__init__.py
@@ -28,7 +28,7 @@ from .controller.appliances import ApplianceVersion, Appliance
from .controller.drawings import Drawing
from .controller.gns3vm import GNS3VM
from .controller.nodes import NodeCreate, NodeUpdate, NodeDuplicate, NodeCapture, Node
-from .controller.projects import ProjectCreate, ProjectUpdate, ProjectDuplicate, Project, ProjectFile
+from .controller.projects import ProjectCreate, ProjectUpdate, ProjectDuplicate, Project, ProjectFile, ProjectCompression
from .controller.users import UserCreate, UserUpdate, LoggedInUserUpdate, User, Credentials, UserGroupCreate, UserGroupUpdate, UserGroup
from .controller.rbac import RoleCreate, RoleUpdate, Role, PermissionCreate, PermissionUpdate, Permission
from .controller.tokens import Token
@@ -73,9 +73,12 @@ from .compute.dynamips_nodes import DynamipsCreate, DynamipsUpdate, Dynamips
from .compute.ethernet_hub_nodes import EthernetHubCreate, EthernetHubUpdate, EthernetHub
from .compute.ethernet_switch_nodes import EthernetSwitchCreate, EthernetSwitchUpdate, EthernetSwitch
from .compute.frame_relay_switch_nodes import FrameRelaySwitchCreate, FrameRelaySwitchUpdate, FrameRelaySwitch
-from .compute.qemu_nodes import QemuCreate, QemuUpdate, QemuImageCreate, QemuImageUpdate, QemuDiskResize, Qemu
+from .compute.qemu_nodes import QemuCreate, QemuUpdate, Qemu
from .compute.iou_nodes import IOUCreate, IOUUpdate, IOUStart, IOU
from .compute.nat_nodes import NATCreate, NATUpdate, NAT
from .compute.vpcs_nodes import VPCSCreate, VPCSUpdate, VPCS
from .compute.vmware_nodes import VMwareCreate, VMwareUpdate, VMware
from .compute.virtualbox_nodes import VirtualBoxCreate, VirtualBoxUpdate, VirtualBox
+
+# Schemas for both controller and compute
+from .qemu_disk_image import QemuDiskImageCreate, QemuDiskImageUpdate
diff --git a/gns3server/schemas/common.py b/gns3server/schemas/common.py
index e524384f..fbbb9b9b 100644
--- a/gns3server/schemas/common.py
+++ b/gns3server/schemas/common.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from pydantic import BaseModel, Field
-from typing import Optional, Union
+from typing import Optional
from enum import Enum
diff --git a/gns3server/schemas/compute/qemu_nodes.py b/gns3server/schemas/compute/qemu_nodes.py
index 2c9d33ed..3a10fe8b 100644
--- a/gns3server/schemas/compute/qemu_nodes.py
+++ b/gns3server/schemas/compute/qemu_nodes.py
@@ -166,15 +166,19 @@ class QemuBase(BaseModel):
aux: Optional[int] = Field(None, gt=0, le=65535, description="Auxiliary console TCP port")
aux_type: Optional[QemuConsoleType] = Field(None, description="Auxiliary console type")
hda_disk_image: Optional[str] = Field(None, description="QEMU hda disk image path")
+ hda_disk_image_backed: Optional[str] = Field(None, description="QEMU hda backed disk image path")
hda_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hda disk image checksum")
hda_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hda interface")
hdb_disk_image: Optional[str] = Field(None, description="QEMU hdb disk image path")
+ hdb_disk_image_backed: Optional[str] = Field(None, description="QEMU hdb backed disk image path")
hdb_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdb disk image checksum")
hdb_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdb interface")
hdc_disk_image: Optional[str] = Field(None, description="QEMU hdc disk image path")
+ hdc_disk_image_backed: Optional[str] = Field(None, description="QEMU hdc backed disk image path")
hdc_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdc disk image checksum")
hdc_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdc interface")
hdd_disk_image: Optional[str] = Field(None, description="QEMU hdd disk image path")
+ hdd_disk_image_backed: Optional[str] = Field(None, description="QEMU hdd backed disk image path")
hdd_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdd disk image checksum")
hdd_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdd interface")
cdrom_image: Optional[str] = Field(None, description="QEMU cdrom image path")
@@ -232,113 +236,7 @@ class Qemu(QemuBase):
status: NodeStatus = Field(..., description="Container status (read only)")
-class QemuDriveName(str, Enum):
- """
- Supported Qemu drive names.
- """
-
- hda = "hda"
- hdb = "hdb"
- hdc = "hdc"
- hdd = "hdd"
-
-
-class QemuDiskResize(BaseModel):
- """
- Properties to resize a Qemu disk.
- """
-
- drive_name: QemuDriveName = Field(..., description="Qemu drive name")
- extend: int = Field(..., description="Number of Megabytes to extend the image")
-
-
class QemuBinaryPath(BaseModel):
path: str
version: str
-
-
-class QemuImageFormat(str, Enum):
- """
- Supported Qemu image formats.
- """
-
- qcow2 = "qcow2"
- qcow = "qcow"
- vpc = "vpc"
- vdi = "vdi"
- vdmk = "vdmk"
- raw = "raw"
-
-
-class QemuImagePreallocation(str, Enum):
- """
- Supported Qemu image preallocation options.
- """
-
- off = "off"
- metadata = "metadata"
- falloc = "falloc"
- full = "full"
-
-
-class QemuImageOnOff(str, Enum):
- """
- Supported Qemu image on/off options.
- """
-
- on = "off"
- off = "off"
-
-
-class QemuImageSubformat(str, Enum):
- """
- Supported Qemu image preallocation options.
- """
-
- dynamic = "dynamic"
- fixed = "fixed"
- stream_optimized = "streamOptimized"
- two_gb_max_extent_sparse = "twoGbMaxExtentSparse"
- two_gb_max_extent_flat = "twoGbMaxExtentFlat"
- monolithic_sparse = "monolithicSparse"
- monolithic_flat = "monolithicFlat"
-
-
-class QemuImageAdapterType(str, Enum):
- """
- Supported Qemu image on/off options.
- """
-
- ide = "ide"
- lsilogic = "lsilogic"
- buslogic = "buslogic"
- legacy_esx = "legacyESX"
-
-
-class QemuImageBase(BaseModel):
-
- qemu_img: str = Field(..., description="Path to the qemu-img binary")
- path: str = Field(..., description="Absolute or relative path of the image")
- format: QemuImageFormat = Field(..., description="Image format type")
- size: int = Field(..., description="Image size in Megabytes")
- preallocation: Optional[QemuImagePreallocation]
- cluster_size: Optional[int]
- refcount_bits: Optional[int]
- lazy_refcounts: Optional[QemuImageOnOff]
- subformat: Optional[QemuImageSubformat]
- static: Optional[QemuImageOnOff]
- zeroed_grain: Optional[QemuImageOnOff]
- adapter_type: Optional[QemuImageAdapterType]
-
-
-class QemuImageCreate(QemuImageBase):
-
- pass
-
-
-class QemuImageUpdate(QemuImageBase):
-
- format: Optional[QemuImageFormat] = Field(None, description="Image format type")
- size: Optional[int] = Field(None, description="Image size in Megabytes")
- extend: Optional[int] = Field(None, description="Number of Megabytes to extend the image")
diff --git a/gns3server/schemas/controller/projects.py b/gns3server/schemas/controller/projects.py
index 2c7c846e..4d98e7c5 100644
--- a/gns3server/schemas/controller/projects.py
+++ b/gns3server/schemas/controller/projects.py
@@ -102,3 +102,15 @@ class ProjectFile(BaseModel):
path: str = Field(..., description="File path")
md5sum: str = Field(..., description="File checksum")
+
+
+class ProjectCompression(str, Enum):
+ """
+ Supported project compression.
+ """
+
+ none = "none"
+ zip = "zip"
+ bzip2 = "bzip2"
+ lzma = "lzma"
+ zstd = "zstd"
diff --git a/gns3server/schemas/qemu_disk_image.py b/gns3server/schemas/qemu_disk_image.py
new file mode 100644
index 00000000..f1a6c000
--- /dev/null
+++ b/gns3server/schemas/qemu_disk_image.py
@@ -0,0 +1,103 @@
+#
+# Copyright (C) 2022 GNS3 Technologies Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from pydantic import BaseModel, Field
+from typing import Optional
+from enum import Enum
+
+
+class QemuDiskImageFormat(str, Enum):
+ """
+ Supported Qemu disk image formats.
+ """
+
+ qcow2 = "qcow2"
+ qcow = "qcow"
+ vpc = "vpc"
+ vdi = "vdi"
+ vdmk = "vdmk"
+ raw = "raw"
+
+
+class QemuDiskImagePreallocation(str, Enum):
+ """
+ Supported Qemu disk image pre-allocation options.
+ """
+
+ off = "off"
+ metadata = "metadata"
+ falloc = "falloc"
+ full = "full"
+
+
+class QemuDiskImageOnOff(str, Enum):
+ """
+ Supported Qemu image on/off options.
+ """
+
+ on = "on"
+ off = "off"
+
+
+class QemuDiskImageSubformat(str, Enum):
+ """
+ Supported Qemu disk image sub-format options.
+ """
+
+ dynamic = "dynamic"
+ fixed = "fixed"
+ stream_optimized = "streamOptimized"
+ two_gb_max_extent_sparse = "twoGbMaxExtentSparse"
+ two_gb_max_extent_flat = "twoGbMaxExtentFlat"
+ monolithic_sparse = "monolithicSparse"
+ monolithic_flat = "monolithicFlat"
+
+
+class QemuDiskImageAdapterType(str, Enum):
+ """
+ Supported Qemu disk image on/off options.
+ """
+
+ ide = "ide"
+ lsilogic = "lsilogic"
+ buslogic = "buslogic"
+ legacy_esx = "legacyESX"
+
+
+class QemuDiskImageBase(BaseModel):
+
+ format: QemuDiskImageFormat = Field(..., description="Image format type")
+ size: int = Field(..., description="Image size in Megabytes")
+ preallocation: Optional[QemuDiskImagePreallocation]
+ cluster_size: Optional[int]
+ refcount_bits: Optional[int]
+ lazy_refcounts: Optional[QemuDiskImageOnOff]
+ subformat: Optional[QemuDiskImageSubformat]
+ static: Optional[QemuDiskImageOnOff]
+ zeroed_grain: Optional[QemuDiskImageOnOff]
+ adapter_type: Optional[QemuDiskImageAdapterType]
+
+
+class QemuDiskImageCreate(QemuDiskImageBase):
+
+ pass
+
+
+class QemuDiskImageUpdate(QemuDiskImageBase):
+
+ format: Optional[QemuDiskImageFormat] = Field(None, description="Image format type")
+ size: Optional[int] = Field(None, description="Image size in Megabytes")
+ extend: Optional[int] = Field(None, description="Number of Megabytes to extend the image")
diff --git a/gns3server/server.py b/gns3server/server.py
index 39c4e095..553fc6f2 100644
--- a/gns3server/server.py
+++ b/gns3server/server.py
@@ -111,7 +111,7 @@ class Server:
)
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="do not show logs on stdout")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="show debug logs")
- parser.add_argument("--logfile", help="send output to logfile instead of console")
+ parser.add_argument("--logfile", "--log", help="send output to logfile instead of console")
parser.add_argument("--logmaxsize", default=10000000, help="maximum logfile size in bytes (default is 10MB)")
parser.add_argument(
"--logbackupcount", default=10, help="number of historical log files to keep (default is 10)"
@@ -255,9 +255,6 @@ class Server:
self._set_config_defaults_from_command_line(args)
config = Config.instance().settings
- if config.Server.local:
- log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
-
if not config.Server.compute_password.get_secret_value():
alphabet = string.ascii_letters + string.digits + string.punctuation
generated_password = ''.join(secrets.choice(alphabet) for _ in range(16))
diff --git a/gns3server/static/web-ui/26.288b4de0ead3b7b9276b.js b/gns3server/static/web-ui/26.52bf50eec59e1bcb0895.js
similarity index 99%
rename from gns3server/static/web-ui/26.288b4de0ead3b7b9276b.js
rename to gns3server/static/web-ui/26.52bf50eec59e1bcb0895.js
index 156815ed..161a604f 100644
--- a/gns3server/static/web-ui/26.288b4de0ead3b7b9276b.js
+++ b/gns3server/static/web-ui/26.52bf50eec59e1bcb0895.js
@@ -1 +1 @@
-"use strict";(self.webpackChunkgns3_web_ui=self.webpackChunkgns3_web_ui||[]).push([[26],{91026:function(W,g,a){a.r(g),a.d(g,{TopologySummaryComponent:function(){return U}});var t=a(65508),_=a(96852),h=a(14200),f=a(36889),v=a(3941),y=a(15132),p=a(40098),x=a(39095),c=a(88802),S=a(73044),d=a(59412),T=a(93386);function C(i,e){if(1&i){var o=t.EpF();t.TgZ(0,"div",2),t.NdJ("mousemove",function(r){return t.CHM(o),t.oxw().dragWidget(r)},!1,t.evT)("mouseup",function(){return t.CHM(o),t.oxw().toggleDragging(!1)},!1,t.evT),t.qZA()}}function b(i,e){1&i&&(t.O4$(),t.TgZ(0,"svg",28),t._UZ(1,"rect",29),t.qZA())}function E(i,e){1&i&&(t.O4$(),t.TgZ(0,"svg",28),t._UZ(1,"rect",30),t.qZA())}function Z(i,e){1&i&&(t.O4$(),t.TgZ(0,"svg",28),t._UZ(1,"rect",31),t.qZA())}function O(i,e){if(1&i&&(t.TgZ(0,"div"),t._uU(1),t.qZA()),2&i){var o=t.oxw().$implicit;t.xp6(1),t.lnq(" ",o.console_type," ",o.console_host,":",o.console," ")}}function P(i,e){1&i&&(t.TgZ(0,"div"),t._uU(1," none "),t.qZA())}function M(i,e){if(1&i&&(t.TgZ(0,"div",25),t.TgZ(1,"div"),t.YNc(2,b,2,0,"svg",26),t.YNc(3,E,2,0,"svg",26),t.YNc(4,Z,2,0,"svg",26),t._uU(5),t.qZA(),t.YNc(6,O,2,3,"div",27),t.YNc(7,P,2,0,"div",27),t.qZA()),2&i){var o=e.$implicit;t.xp6(2),t.Q6J("ngIf","started"===o.status),t.xp6(1),t.Q6J("ngIf","suspended"===o.status),t.xp6(1),t.Q6J("ngIf","stopped"===o.status),t.xp6(1),t.hij(" ",o.name," "),t.xp6(1),t.Q6J("ngIf",null!=o.console&&null!=o.console&&"none"!=o.console_type),t.xp6(1),t.Q6J("ngIf",null==o.console||"none"===o.console_type)}}function w(i,e){1&i&&(t.O4$(),t.TgZ(0,"svg",28),t._UZ(1,"rect",29),t.qZA())}function A(i,e){1&i&&(t.O4$(),t.TgZ(0,"svg",28),t._UZ(1,"rect",31),t.qZA())}function F(i,e){if(1&i&&(t.TgZ(0,"div",25),t.TgZ(1,"div"),t.YNc(2,w,2,0,"svg",26),t.YNc(3,A,2,0,"svg",26),t._uU(4),t.qZA(),t.TgZ(5,"div"),t._uU(6),t.qZA(),t.TgZ(7,"div"),t._uU(8),t.qZA(),t.qZA()),2&i){var o=e.$implicit,s=t.oxw(2);t.xp6(2),t.Q6J("ngIf",o.connected),t.xp6(1),t.Q6J("ngIf",!o.connected),t.xp6(1),t.hij(" ",o.name," "),t.xp6(2),t.hij(" ",o.host," "),t.xp6(2),t.hij(" ",s.server.location," ")}}var I=function(i){return{lightTheme:i}},D=function(){return{right:!0,left:!0,bottom:!0,top:!0}};function N(i,e){if(1&i){var o=t.EpF();t.TgZ(0,"div",3),t.NdJ("mousedown",function(){return t.CHM(o),t.oxw().toggleDragging(!0)})("resizeStart",function(){return t.CHM(o),t.oxw().toggleDragging(!1)})("resizeEnd",function(n){return t.CHM(o),t.oxw().onResizeEnd(n)}),t.TgZ(1,"div",4),t.TgZ(2,"mat-tab-group"),t.TgZ(3,"mat-tab",5),t.NdJ("click",function(){return t.CHM(o),t.oxw().toggleTopologyVisibility(!0)}),t.TgZ(4,"div",6),t.TgZ(5,"div",7),t.TgZ(6,"mat-select",8),t.TgZ(7,"mat-optgroup",9),t.TgZ(8,"mat-option",10),t.NdJ("onSelectionChange",function(){return t.CHM(o),t.oxw().applyStatusFilter("started")}),t._uU(9,"started"),t.qZA(),t.TgZ(10,"mat-option",11),t.NdJ("onSelectionChange",function(){return t.CHM(o),t.oxw().applyStatusFilter("suspended")}),t._uU(11,"suspended"),t.qZA(),t.TgZ(12,"mat-option",12),t.NdJ("onSelectionChange",function(){return t.CHM(o),t.oxw().applyStatusFilter("stopped")}),t._uU(13,"stopped"),t.qZA(),t.qZA(),t.TgZ(14,"mat-optgroup",13),t.TgZ(15,"mat-option",14),t.NdJ("onSelectionChange",function(){return t.CHM(o),t.oxw().applyCaptureFilter("capture")}),t._uU(16,"active capture(s)"),t.qZA(),t.TgZ(17,"mat-option",15),t.NdJ("onSelectionChange",function(){return t.CHM(o),t.oxw().applyCaptureFilter("packet")}),t._uU(18,"active packet captures"),t.qZA(),t.qZA(),t.qZA(),t.qZA(),t.TgZ(19,"div",16),t.TgZ(20,"mat-select",17),t.NdJ("selectionChange",function(){return t.CHM(o),t.oxw().setSortingOrder()})("valueChange",function(n){return t.CHM(o),t.oxw().sortingOrder=n}),t.TgZ(21,"mat-option",18),t._uU(22,"sort by name ascending"),t.qZA(),t.TgZ(23,"mat-option",19),t._uU(24,"sort by name descending"),t.qZA(),t.qZA(),t.qZA(),t._UZ(25,"mat-divider",20),t.TgZ(26,"div",21),t.YNc(27,M,8,6,"div",22),t.qZA(),t.qZA(),t.qZA(),t.TgZ(28,"mat-tab",23),t.NdJ("click",function(){return t.CHM(o),t.oxw().toggleTopologyVisibility(!1)}),t.TgZ(29,"div",6),t.TgZ(30,"div",24),t.YNc(31,F,9,5,"div",22),t.qZA(),t.qZA(),t.qZA(),t.qZA(),t.qZA(),t.qZA()}if(2&i){var s=t.oxw();t.Q6J("ngStyle",s.style)("ngClass",t.VKq(9,I,s.isLightThemeEnabled))("validateResize",s.validate)("resizeEdges",t.DdM(11,D))("enableGhostResize",!0),t.xp6(20),t.Q6J("value",s.sortingOrder),t.xp6(6),t.Q6J("ngStyle",s.styleInside),t.xp6(1),t.Q6J("ngForOf",s.filteredNodes),t.xp6(4),t.Q6J("ngForOf",s.computes)}}var U=function(){function i(e,o,s,r,n){this.nodesDataSource=e,this.projectService=o,this.computeService=s,this.linksDataSource=r,this.themeService=n,this.closeTopologySummary=new t.vpe,this.style={},this.styleInside={height:"280px"},this.subscriptions=[],this.nodes=[],this.filteredNodes=[],this.sortingOrder="asc",this.startedStatusFilterEnabled=!1,this.suspendedStatusFilterEnabled=!1,this.stoppedStatusFilterEnabled=!1,this.captureFilterEnabled=!1,this.packetFilterEnabled=!1,this.computes=[],this.isTopologyVisible=!0,this.isDraggingEnabled=!1,this.isLightThemeEnabled=!1}return i.prototype.ngOnInit=function(){var e=this;this.isLightThemeEnabled="light"===this.themeService.getActualTheme(),this.subscriptions.push(this.nodesDataSource.changes.subscribe(function(o){e.nodes=o,e.nodes.forEach(function(s){("0.0.0.0"===s.console_host||"0:0:0:0:0:0:0:0"===s.console_host||"::"===s.console_host)&&(s.console_host=e.server.host)}),e.filteredNodes=o.sort("asc"===e.sortingOrder?e.compareAsc:e.compareDesc)})),this.projectService.getStatistics(this.server,this.project.project_id).subscribe(function(o){e.projectsStatistics=o}),this.computeService.getComputes(this.server).subscribe(function(o){e.computes=o}),this.revertPosition()},i.prototype.revertPosition=function(){var e=localStorage.getItem("leftPosition"),o=localStorage.getItem("rightPosition"),s=localStorage.getItem("topPosition"),r=localStorage.getItem("widthOfWidget"),n=localStorage.getItem("heightOfWidget");this.style=s?{position:"fixed",left:+e+"px",right:+o+"px",top:+s+"px",width:+r+"px",height:+n+"px"}:{top:"60px",right:"0px",width:"320px",height:"400px"}},i.prototype.toggleDragging=function(e){this.isDraggingEnabled=e},i.prototype.dragWidget=function(e){var o=Number(e.movementX),s=Number(e.movementY),r=Number(this.style.width.split("px")[0]),n=Number(this.style.height.split("px")[0]),l=Number(this.style.top.split("px")[0])+s;if(this.style.left){var u=Number(this.style.left.split("px")[0])+o;this.style={position:"fixed",left:u+"px",top:l+"px",width:r+"px",height:n+"px"},localStorage.setItem("leftPosition",u.toString()),localStorage.setItem("topPosition",l.toString()),localStorage.setItem("widthOfWidget",r.toString()),localStorage.setItem("heightOfWidget",n.toString())}else{var m=Number(this.style.right.split("px")[0])-o;this.style={position:"fixed",right:m+"px",top:l+"px",width:r+"px",height:n+"px"},localStorage.setItem("rightPosition",m.toString()),localStorage.setItem("topPosition",l.toString()),localStorage.setItem("widthOfWidget",r.toString()),localStorage.setItem("heightOfWidget",n.toString())}},i.prototype.validate=function(e){return!(e.rectangle.width&&e.rectangle.height&&(e.rectangle.width<290||e.rectangle.height<260))},i.prototype.onResizeEnd=function(e){this.style={position:"fixed",left:e.rectangle.left+"px",top:e.rectangle.top+"px",width:e.rectangle.width+"px",height:e.rectangle.height+"px"},this.styleInside={height:e.rectangle.height-120+"px"}},i.prototype.toggleTopologyVisibility=function(e){this.isTopologyVisible=e,this.revertPosition()},i.prototype.compareAsc=function(e,o){return e.name
-
+