Merge pull request #2258 from GNS3/release-v2.2.41

Release v2.2.41
This commit is contained in:
Jeremy Grossmann 2023-07-12 18:24:40 +10:00 committed by GitHub
commit b76d2c2150
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 294 additions and 96 deletions

View File

@ -1,5 +1,14 @@
# Change Log
## 2.2.41 12/07/2023
* Bundle web-ui v2.2.41
* Catch urllib3 exceptions when sending crash report. Ref https://github.com/GNS3/gns3-gui/issues/3483
* Only fetch Qemu version once when starting Qemu + only add speed/duplex for virtio-net-pci with Qemu version >= 2.12
* Use recent OVMF firmware (stable-202305) and use flash drives to configure Qemu command line
* Remove the useless executable permissions to the file gns3server/disks/empty8G.qcow2
* Backport UEFI boot mode support for Qemu VMs
## 2.2.40.1 10/06/2023
* Re-bundle Web-Ui v2.2.40. Fixes #2239

View File

@ -0,0 +1,43 @@
{
"appliance_id": "8fecbf89-5cd1-4aea-b735-5f36cf0efbb7",
"name": "BIRD2",
"category": "router",
"description": "The BIRD project aims to develop a fully functional dynamic IP routing daemon primarily targeted on (but not limited to) Linux, FreeBSD and other UNIX-like systems and distributed under the GNU General Public License.",
"vendor_name": "CZ.NIC Labs",
"vendor_url": "https://bird.network.cz",
"documentation_url": "https://bird.network.cz/?get_doc&f=bird.html&v=20",
"product_name": "BIRD internet routing daemon",
"registry_version": 4,
"status": "stable",
"maintainer": "Bernhard Ehlers",
"maintainer_email": "dev-ehlers@mailbox.org",
"usage": "Username:\tgns3\nPassword:\tgns3\nTo become root, use \"sudo -s\".\n\nNetwork configuration:\nsudo nano /etc/network/interfaces\nsudo systemctl restart networking\n\nBIRD:\nRestart: sudo systemctl restart bird\nReconfigure: birdc configure",
"port_name_format": "eth{0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,
"ram": 512,
"hda_disk_interface": "scsi",
"arch": "x86_64",
"console_type": "telnet",
"kvm": "allow"
},
"images": [
{
"filename": "bird2-debian-2.0.12.qcow2",
"version": "2.0.12",
"md5sum": "435218a2e90cba921cc7fde1d64a9419",
"filesize": 287965184,
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "http://downloads.sourceforge.net/project/gns-3/Qemu%20Appliances/bird2-debian-2.0.12.qcow2"
}
],
"versions": [
{
"name": "2.0.12",
"images": {
"hda_disk_image": "bird2-debian-2.0.12.qcow2"
}
}
]
}

View File

@ -24,10 +24,17 @@
"kvm": "require"
},
"images": [
{
"filename": "c8000v-universalk9_8G_serial.17.06.05.qcow2",
"version": "17.06.05 8G",
"md5sum": "aeb15ab8e1cbd0cd76f7260a81442f98",
"filesize": 1777795072,
"download_url": "https://software.cisco.com/download/home/286327102/type/282046477/release/Bengaluru-17.6.5"
},
{
"filename": "c8000v-universalk9_8G_serial.17.06.01a.qcow2",
"version": "17.06.01a 8G",
"md5sum": "d8b8ae633d953ec1b6d8f18a09a4f4e7",
"md5sum": "e278fa644295c703976a86f7f1c1cd65",
"filesize": 1595277312,
"download_url": "https://software.cisco.com/download/home/286327102/type/282046477/release/Bengaluru-17.6.1a"
},
@ -47,6 +54,12 @@
}
],
"versions": [
{
"name": "17.06.05 8G",
"images": {
"hda_disk_image": "c8000v-universalk9_8G_serial.17.06.05.qcow2"
}
},
{
"name": "17.06.01a 8G",
"images": {

View File

@ -188,12 +188,12 @@
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "OVMF-20160813.fd",
"version": "16.08.13",
"md5sum": "8ff0ef1ec56345db5b6bda1a8630e3c6",
"filesize": 2097152,
"download_url": "",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-20160813.fd.zip/download",
"filename": "OVMF-edk2-stable202305.fd",
"version": "stable202305",
"md5sum": "6c4cf1519fec4a4b95525d9ae562963a",
"filesize": 4194304,
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-edk2-stable202305.fd.zip/download",
"compression": "zip"
}
],
@ -201,161 +201,161 @@
{
"name": "9500v 10.1.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9500v64.10.1.1.qcow2"
}
},
{
"name": "9300v 10.1.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9300v.10.1.1.qcow2"
}
},
{
"name": "9500v 9.3.9",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9500v.9.3.9.qcow2"
}
},
{
"name": "9300v 9.3.9",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9300v.9.3.9.qcow2"
}
},
{
"name": "9300v 9.3.8",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9300v.9.3.8.qcow2"
}
},
{
"name": "9500v 9.3.7",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9500v.9.3.7.qcow2"
}
},
{
"name": "9500v 9.3.3",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9500v.9.3.3.qcow2"
}
},
{
"name": "9300v 9.3.3",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nexus9300v.9.3.3.qcow2"
}
},
{
"name": "9.3.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv.9.3.1.qcow2"
}
},
{
"name": "9.2.3",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.9.2.3.qcow2"
}
},
{
"name": "9.2.2",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.9.2.2.qcow2"
}
},
{
"name": "9.2.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.9.2.1.qcow2"
}
},
{
"name": "7.0.3.I7.9",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.9.qcow2"
}
},
{
"name": "7.0.3.I7.7",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.7.qcow2"
}
},
{
"name": "7.0.3.I7.6",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.6.qcow2"
}
},
{
"name": "7.0.3.I7.5",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.5.qcow2"
}
},
{
"name": "7.0.3.I7.4",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.4.qcow2"
}
},
{
"name": "7.0.3.I7.3",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.3.qcow2"
}
},
{
"name": "7.0.3.I7.2",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.2.qcow2"
}
},
{
"name": "7.0.3.I7.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.1.qcow2"
}
},
{
"name": "7.0.3.I6.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I6.1.qcow2"
}
},
{
"name": "7.0.3.I5.2",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I5.2.qcow2"
}
},
{
"name": "7.0.3.I5.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "nxosv-final.7.0.3.I5.1.qcow2"
}
}

View File

@ -8,8 +8,8 @@
"product_name": "Debian",
"registry_version": 4,
"status": "experimental",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"maintainer": "Bernhard Ehlers",
"maintainer_email": "dev-ehlers@mailbox.org",
"usage": "Username:\tdebian\nPassword:\tdebian\nTo become root, use \"sudo -s\".\n\nNetwork configuration:\n- In \"/etc/network/interfaces\" comment out \"source-directory /run/network/interfaces.d\"\n- Remove \"/etc/network/interfaces.d/50-cloud-init\"\n- Create \"/etc/network/interfaces.d/10-ens4\", for example:\n\nauto ens4\n#iface ens4 inet dhcp\niface ens4 inet static\n address 10.1.1.100/24\n gateway 10.1.1.1\n dns-nameservers 10.1.1.1\n",
"symbol": "linux_guest.svg",
"port_name_format": "ens{port4}",
@ -24,20 +24,28 @@
},
"images": [
{
"filename": "debian-11-genericcloud-amd64-20221219-1234.qcow2",
"version": "11.6",
"md5sum": "bd6ddbccc89e40deb7716b812958238d",
"filesize": 258801664,
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20221219-1234/debian-11-genericcloud-amd64-20221219-1234.qcow2"
"filename": "debian-12-genericcloud-amd64-20230612-1409.qcow2",
"version": "12.0",
"md5sum": "524cf33a1284d6e3363bd4d843756386",
"filesize": 280166400,
"download_url": "https://cloud.debian.org/images/cloud/bookworm/",
"direct_download_url": "https://cloud.debian.org/images/cloud/bookworm/20230612-1409/debian-12-genericcloud-amd64-20230612-1409.qcow2"
},
{
"filename": "debian-10-genericcloud-amd64-20220911-1135.qcow2",
"filename": "debian-11-genericcloud-amd64-20230601-1398.qcow2",
"version": "11.7",
"md5sum": "1b24a841dc5ca9bcf40b94ad4b4775d4",
"filesize": 259063808,
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20230601-1398/debian-11-genericcloud-amd64-20230601-1398.qcow2"
},
{
"filename": "debian-10-genericcloud-amd64-20230601-1398.qcow2",
"version": "10.13",
"md5sum": "9d4d1175bef974caba79dd6ca33d500c",
"filesize": 234749952,
"md5sum": "ca799fb4011712f4686c422c1a9731cf",
"filesize": 228130816,
"download_url": "https://cloud.debian.org/images/cloud/buster/",
"direct_download_url": "https://cloud.debian.org/images/cloud/buster/20220911-1135/debian-10-genericcloud-amd64-20220911-1135.qcow2"
"direct_download_url": "https://cloud.debian.org/images/cloud/buster/20230601-1398/debian-10-genericcloud-amd64-20230601-1398.qcow2"
},
{
"filename": "debian-cloud-init-data.iso",
@ -50,16 +58,23 @@
],
"versions": [
{
"name": "11.6",
"name": "12.0",
"images": {
"hda_disk_image": "debian-11-genericcloud-amd64-20221219-1234.qcow2",
"hda_disk_image": "debian-12-genericcloud-amd64-20230612-1409.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
},
{
"name": "11.7",
"images": {
"hda_disk_image": "debian-11-genericcloud-amd64-20230601-1398.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
},
{
"name": "10.13",
"images": {
"hda_disk_image": "debian-10-genericcloud-amd64-20220911-1135.qcow2",
"hda_disk_image": "debian-10-genericcloud-amd64-20230601-1398.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
}

View File

@ -8,8 +8,8 @@
"product_name": "ipterm",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"maintainer": "Bernhard Ehlers",
"maintainer_email": "dev-ehlers@mailbox.org",
"usage": "The /root directory is persistent.",
"symbol": "linux_guest.svg",
"docker": {

View File

@ -27,6 +27,15 @@
"options": "-nographic"
},
"images": [
{
"filename": "chr-7.10.1.img",
"version": "7.10.1",
"md5sum": "917729e79b9992562f4160d461b21cac",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download.mikrotik.com/routeros/7.10.1/chr-7.10.1.img.zip",
"compression": "zip"
},
{
"filename": "chr-7.7.img",
"version": "7.7",
@ -83,6 +92,12 @@
}
],
"versions": [
{
"name": "7.10.1",
"images": {
"hda_disk_image": "chr-7.10.1.img"
}
},
{
"name": "7.7",
"images": {

View File

@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
{
"filename": "OPNsense-23.1-OpenSSL-nano-amd64.img",
"version": "23.1",
"md5sum": "db7d3b9fd3b94894623368db1041ff11",
"filesize": 3221225472,
"download_url": "https://opnsense.c0urier.net/releases/23.1/"
},
{
"filename": "OPNsense-22.1.2-OpenSSL-nano-amd64.img",
"version": "22.1.2",
@ -55,6 +62,12 @@
}
],
"versions": [
{
"name": "23.1",
"images": {
"hda_disk_image": "OPNsense-23.1-OpenSSL-nano-amd64.img"
}
},
{
"name": "22.1.2",
"images": {

View File

@ -24,6 +24,13 @@
"process_priority": "normal"
},
"images": [
{
"filename": "pfSense-CE-2.7.0-RELEASE-amd64.iso",
"version": "2.7.0",
"md5sum": "cb0b72ca864d06682265de5e5a72a1fb",
"filesize": 765218816,
"download_url": "https://www.pfsense.org/download/mirror.php?section=downloads"
},
{
"filename": "pfSense-CE-2.6.0-RELEASE-amd64.iso",
"version": "2.6.0",
@ -69,6 +76,13 @@
}
],
"versions": [
{
"name": "2.7.0",
"images": {
"hda_disk_image": "empty100G.qcow2",
"cdrom_image": "pfSense-CE-2.7.0-RELEASE-amd64.iso"
}
},
{
"name": "2.6.0",
"images": {

View File

@ -8,8 +8,8 @@
"product_name": "webterm",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"maintainer": "Bernhard Ehlers",
"maintainer_email": "dev-ehlers@mailbox.org",
"usage": "The /root directory is persistent.",
"symbol": "firefox.svg",
"docker": {

View File

@ -38,12 +38,12 @@
"compression": "zip"
},
{
"filename": "OVMF-20160813.fd",
"version": "16.08.13",
"md5sum": "8ff0ef1ec56345db5b6bda1a8630e3c6",
"filesize": 2097152,
"download_url": "",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-20160813.fd.zip/download",
"filename": "OVMF-edk2-stable202305.fd",
"version": "stable202305",
"md5sum": "6c4cf1519fec4a4b95525d9ae562963a",
"filesize": 4194304,
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-edk2-stable202305.fd.zip/download",
"compression": "zip"
}
],
@ -51,7 +51,7 @@
{
"name": "2212",
"images": {
"bios_image": "OVMF-20160813.fd",
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "WinDev2212Eval-disk1.vmdk"
}
}

View File

@ -86,6 +86,7 @@ class QemuVM(BaseNode):
self._local_udp_tunnels = {}
self._guest_cid = None
self._command_line_changed = False
self._qemu_version = None
# QEMU VM settings
if qemu_path:
@ -122,6 +123,7 @@ class QemuVM(BaseNode):
self._kernel_image = ""
self._kernel_command_line = ""
self._tpm = False
self._uefi = False
self._legacy_networking = False
self._replicate_network_connection_state = True
self._create_config_disk = False
@ -833,6 +835,30 @@ class QemuVM(BaseNode):
log.info('QEMU VM "{name}" [{id}] has disabled the Trusted Platform Module (TPM)'.format(name=self._name, id=self._id))
self._tpm = tpm
@property
def uefi(self):
"""
Returns whether UEFI boot mode is activated for this QEMU VM.
:returns: boolean
"""
return self._uefi
@uefi.setter
def uefi(self, uefi):
"""
Sets whether UEFI boot mode is activated for this QEMU VM.
:param uefi: boolean
"""
if uefi:
log.info(f'QEMU VM "{self._name}" [{self._id}] has enabled the UEFI boot mode')
else:
log.info(f'QEMU VM "{self._name}" [{self._id}] has disabled the UEFI boot mode')
self._uefi = uefi
@property
def options(self):
"""
@ -1846,8 +1872,7 @@ class QemuVM(BaseNode):
# special case, sata controller doesn't exist in Qemu
options.extend(["-device", 'ahci,id=ahci{}'.format(disk_index)])
options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk{}'.format(disk, disk_index, disk_index, extra_drive_options)])
qemu_version = await self.manager.get_qemu_version(self.qemu_path)
if qemu_version and parse_version(qemu_version) >= parse_version("4.2.0"):
if self._qemu_version and parse_version(self._qemu_version) >= parse_version("4.2.0"):
# The ide-drive device is deprecated since version 4.2.0
# https://qemu.readthedocs.io/en/latest/system/deprecated.html#ide-drive-since-4-2
options.extend(["-device", 'ide-hd,drive=drive{},bus=ahci{}.0,id=drive{}'.format(disk_index, disk_index, disk_index)])
@ -1994,12 +2019,28 @@ class QemuVM(BaseNode):
options = []
if self._bios_image:
if self._uefi:
raise QemuError("Cannot use a bios image and the UEFI boot mode at the same time")
if not os.path.isfile(self._bios_image) or not os.path.exists(self._bios_image):
if os.path.islink(self._bios_image):
raise QemuError("bios image '{}' linked to '{}' is not accessible".format(self._bios_image, os.path.realpath(self._bios_image)))
else:
raise QemuError("bios image '{}' is not accessible".format(self._bios_image))
options.extend(["-bios", self._bios_image.replace(",", ",,")])
elif self._uefi:
# get the OVMF firmware from the images directory
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
log.info("Configuring UEFI boot mode using OVMF file: '{}'".format(ovmf_firmware_path))
options.extend(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_firmware_path)])
# the node should have its own copy of OVMF_VARS.fd (the UEFI variables store)
ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
if not os.path.exists(ovmf_vars_node_path):
try:
shutil.copyfile(self.manager.get_abs_image_path("OVMF_VARS.fd"), ovmf_vars_node_path)
except OSError as e:
raise QemuError("Cannot copy OVMF_VARS.fd file to the node working directory: {}".format(e))
options.extend(["-drive", "if=pflash,format=raw,file={}".format(ovmf_vars_node_path)])
return options
def _linux_boot_options(self):
@ -2090,11 +2131,10 @@ class QemuVM(BaseNode):
patched_qemu = False
if self._legacy_networking:
qemu_version = await self.manager.get_qemu_version(self.qemu_path)
if qemu_version:
if parse_version(qemu_version) >= parse_version("2.9.0"):
if self._qemu_version:
if parse_version(self._qemu_version) >= parse_version("2.9.0"):
raise QemuError("Qemu version 2.9.0 and later doesn't support legacy networking mode")
if parse_version(qemu_version) < parse_version("1.1.0"):
if parse_version(self._qemu_version) < parse_version("1.1.0"):
# this is a patched Qemu if version is below 1.1.0
patched_qemu = True
@ -2103,8 +2143,7 @@ class QemuVM(BaseNode):
pci_bridges = math.floor(pci_devices / 32)
pci_bridges_created = 0
if pci_bridges >= 1:
qemu_version = await self.manager.get_qemu_version(self.qemu_path)
if qemu_version and parse_version(qemu_version) < parse_version("2.4.0"):
if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.4.0"):
raise QemuError("Qemu version 2.4 or later is required to run this VM with a large number of network adapters")
pci_device_id = 4 + pci_bridges # Bridge consume PCI ports
@ -2150,7 +2189,9 @@ class QemuVM(BaseNode):
else:
# newer QEMU networking syntax
device_string = "{},mac={}".format(adapter_type, mac)
if adapter_type == "virtio-net-pci":
if adapter_type == "virtio-net-pci" and \
self._qemu_version and parse_version(self._qemu_version) >= parse_version("2.12"):
# speed and duplex support was added in Qemu 2.12
device_string = "{},speed=10000,duplex=full".format(device_string)
bridge_id = math.floor(pci_device_id / 32)
if bridge_id > 0:
@ -2183,8 +2224,7 @@ class QemuVM(BaseNode):
if any(opt in self._options for opt in ["-display", "-nographic", "-curses", "-sdl" "-spice", "-vnc"]):
return []
version = await self.manager.get_qemu_version(self.qemu_path)
if version and parse_version(version) >= parse_version("3.0"):
if self._qemu_version and parse_version(self._qemu_version) >= parse_version("3.0"):
return ["-display", "none"]
else:
return ["-nographic"]
@ -2229,9 +2269,8 @@ class QemuVM(BaseNode):
elif sys.platform.startswith("win"):
if require_hardware_accel:
# HAXM is only available starting with Qemu version 2.9.0
version = await self.manager.get_qemu_version(self.qemu_path)
if version and parse_version(version) < parse_version("2.9.0"):
raise QemuError("HAXM acceleration can only be enable for Qemu version 2.9.0 and above (current version: {})".format(version))
if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.9.0"):
raise QemuError("HAXM acceleration can only be enable for Qemu version 2.9.0 and above (current version: {})".format(self._qemu_version))
# check if HAXM is installed
version = self.manager.get_haxm_windows_version()
@ -2331,6 +2370,7 @@ class QemuVM(BaseNode):
(to be passed to subprocess.Popen())
"""
self._qemu_version = await self.manager.get_qemu_version(self.qemu_path)
vm_name = self._name.replace(",", ",,")
project_path = self.project.path.replace(",", ",,")
additional_options = self._options.strip()
@ -2348,10 +2388,9 @@ class QemuVM(BaseNode):
if await self._run_with_hardware_acceleration(self.qemu_path, self._options):
if sys.platform.startswith("linux"):
command.extend(["-enable-kvm"])
version = await self.manager.get_qemu_version(self.qemu_path)
# Issue on some combo Intel CPU + KVM + Qemu 2.4.0
# https://github.com/GNS3/gns3-server/issues/685
if version and parse_version(version) >= parse_version("2.4.0") and self.platform == "x86_64":
if self._qemu_version and parse_version(self._qemu_version) >= parse_version("2.4.0") and self.platform == "x86_64":
command.extend(["-machine", "smm=off"])
elif sys.platform.startswith("win") or sys.platform.startswith("darwin"):
command.extend(["-enable-hax"])

View File

@ -29,6 +29,7 @@ import struct
import platform
import locale
import distro
import urllib3
from .version import __version__, __version_info__
from .config import Config
@ -58,7 +59,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "https://7c34b76e7d244ce88fba19f40d283e09@o19455.ingest.sentry.io/38482"
DSN = "https://e27fbedc2e824ceb9f8d8508c5d37507@o19455.ingest.sentry.io/38482"
_instance = None
def __init__(self):
@ -82,11 +83,15 @@ class CrashReport:
# Don't send log records as events.
sentry_logging = LoggingIntegration(level=logging.INFO, event_level=None)
sentry_sdk.init(dsn=CrashReport.DSN,
release=__version__,
ca_certs=cacert,
default_integrations=False,
integrations=[sentry_logging])
try:
sentry_sdk.init(dsn=CrashReport.DSN,
release=__version__,
ca_certs=cacert,
default_integrations=False,
integrations=[sentry_logging])
except urllib3.exceptions.HTTPError as e:
log.error("Crash report could not be sent: {}".format(e))
return
tags = {
"os:name": platform.system(),

Binary file not shown.

Binary file not shown.

0
gns3server/disks/empty8G.qcow2 Executable file → Normal file
View File

View File

@ -194,6 +194,10 @@ QEMU_CREATE_SCHEMA = {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": ["boolean", "null"],
},
"uefi": {
"description": "Enable the UEFI boot mode in Qemu",
"type": ["boolean", "null"],
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -392,6 +396,10 @@ QEMU_UPDATE_SCHEMA = {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": ["boolean", "null"],
},
"uefi": {
"description": "Enable the UEFI boot mode in Qemu",
"type": ["boolean", "null"],
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -603,6 +611,10 @@ QEMU_OBJECT_SCHEMA = {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": "boolean",
},
"uefi": {
"description": "Enable the UEFI boot mode in Qemu",
"type": "boolean",
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -678,6 +690,7 @@ QEMU_OBJECT_SCHEMA = {
"legacy_networking",
"replicate_network_connection_state",
"tpm",
"uefi",
"create_config_disk",
"on_close",
"cpu_throttling",

View File

@ -188,6 +188,11 @@ QEMU_TEMPLATE_PROPERTIES = {
"type": "boolean",
"default": False
},
"uefi": {
"description": "Enable the UEFI boot mode in Qemu",
"type": "boolean",
"default": False
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": "boolean",

View File

@ -46,6 +46,6 @@
gtag('config', 'G-5D6FZL9923');
</script>
<script src="runtime.baa1121a4737aeb68bb7.js" defer></script><script src="polyfills-es5.865074f5cd9a121111a2.js" nomodule defer></script><script src="polyfills.2f91a039d848e57ff02e.js" defer></script><script src="main.8448c96e4facbe79a613.js" defer></script>
<script src="runtime.baa1121a4737aeb68bb7.js" defer></script><script src="polyfills-es5.865074f5cd9a121111a2.js" nomodule defer></script><script src="polyfills.2f91a039d848e57ff02e.js" defer></script><script src="main.1379a5647e8bc6d3e401.js" defer></script>
</body></html>

View File

@ -23,8 +23,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "2.2.40.1"
__version_info__ = (2, 2, 40, 0)
__version__ = "2.2.41"
__version_info__ = (2, 2, 41, 0)
if "dev" in __version__:
try:

View File

@ -78,6 +78,7 @@ async def vm(compute_project, manager, fake_qemu_binary, fake_qemu_img_binary):
vm._start_ubridge = AsyncioMagicMock()
vm._ubridge_hypervisor = MagicMock()
vm._ubridge_hypervisor.is_running.return_value = True
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="6.2.0")
vm.manager.config.set("Qemu", "enable_hardware_acceleration", False)
return vm
@ -141,7 +142,6 @@ async def test_is_running(vm, running_subprocess_mock):
async def test_start(vm, running_subprocess_mock):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=running_subprocess_mock) as mock:
await vm.start()
@ -156,7 +156,6 @@ async def test_stop(vm, running_subprocess_mock):
future = asyncio.Future()
future.set_result(True)
process.wait.return_value = future
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
@ -241,7 +240,6 @@ async def test_port_remove_nio_binding(vm):
async def test_close(vm, port_manager):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
await vm.start()
@ -369,7 +367,6 @@ async def test_disk_options(vm, tmpdir, fake_qemu_img_binary):
async def test_cdrom_option(vm, tmpdir, fake_qemu_img_binary):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
vm._cdrom_image = str(tmpdir / "test.iso")
open(vm._cdrom_image, "w+").close()
@ -380,13 +377,38 @@ async def test_cdrom_option(vm, tmpdir, fake_qemu_img_binary):
async def test_bios_option(vm, tmpdir, fake_qemu_img_binary):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
vm._bios_image = str(tmpdir / "test.img")
open(vm._bios_image, "w+").close()
options = await vm._build_command()
assert ' '.join(['-bios', str(tmpdir / "test.img")]) in ' '.join(options)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Test not working on Windows")
async def test_uefi_boot_mode_option(vm, tmpdir, images_dir, fake_qemu_img_binary):
vm._uefi = True
# create fake OVMF files
ovmf_code_path = os.path.join(images_dir, "OVMF_CODE.fd")
with open(ovmf_code_path, "w+") as f:
f.write('1')
ovmf_vars_path = os.path.join(images_dir, "OVMF_VARS.fd")
with open(ovmf_vars_path, "w+") as f:
f.write('1')
options = await vm._build_command()
assert ' '.join(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_code_path)]) in ' '.join(options)
assert ' '.join(["-drive", "if=pflash,format=raw,file={}".format(os.path.join(vm.working_dir, "OVMF_VARS.fd"))]) in ' '.join(options)
async def test_uefi_with_bios_image_already_configured(vm, tmpdir, fake_qemu_img_binary):
vm._bios_image = str(tmpdir / "test.img")
vm._uefi = True
with pytest.raises(QemuError):
await vm._build_command()
async def test_vnc_option(vm, fake_qemu_img_binary):
vm._console_type = 'vnc'
@ -406,7 +428,6 @@ async def test_spice_option(vm, fake_qemu_img_binary):
async def test_tpm_option(vm, tmpdir, fake_qemu_img_binary):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
vm._tpm = True
tpm_sock = os.path.join(vm.temporary_directory, "swtpm.sock")
with patch("os.path.exists", return_value=True) as os_path:
@ -500,7 +521,6 @@ async def test_control_vm_expect_text(vm, running_subprocess_mock):
async def test_build_command(vm, fake_qemu_binary):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
cmd = await vm._build_command()
@ -535,7 +555,6 @@ async def test_build_command_manual_uuid(vm):
If user has set a uuid we keep it
"""
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
vm.options = "-uuid e1c307a4-896f-11e6-81a5-3c07547807cc"
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
@ -670,7 +689,6 @@ async def test_build_command_two_adapters_mac_address(vm):
Should support multiple base vmac address
"""
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.5.0")
vm.adapters = 2
vm.mac_address = "00:00:ab:0e:0f:09"
mac_0 = vm._mac_address
@ -695,12 +713,9 @@ async def test_build_command_two_adapters_mac_address(vm):
async def test_build_command_large_number_of_adapters(vm):
"""
When we have more than 28 interface we need to add a pci bridge for
additional interfaces
additional interfaces (supported only with Qemu 2.4 and later)
"""
# It's supported only with Qemu 2.4 and later
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.4.0")
vm.adapters = 100
vm.mac_address = "00:00:ab:0e:0f:09"
mac_0 = vm._mac_address
@ -742,7 +757,6 @@ async def test_build_command_with_virtio_net_pci_adapter(vm):
Test virtio-net-pci adapter which has parameters speed=1000 & duplex=full hard-coded
"""
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.4.0")
vm.adapters = 1
vm.mac_address = "00:00:ab:0e:0f:09"
vm._adapter_type = "virtio-net-pci"