def set_vmname(self, vmname): """ Renames the VirtualBox VM. :param vmname: VirtualBox VM name """ if vmname == self._vmname: return if self.linked_clone: if self.status == "started": raise VirtualBoxError( "You can't change the name of running VM {}".format( self._name)) # We can't rename a VM to name that already exists vms = yield from self.manager.list_vms(allow_clone=True) if vmname in [vm["vmname"] for vm in vms]: raise VirtualBoxError( "You can't change the name to {} it's already use in VirtualBox" .format(vmname)) yield from self._modify_vm('--name "{}"'.format(vmname)) log.info( "VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'". format(name=self.name, id=self.id, vmname=vmname)) self._vmname = vmname
def _check_duplicate_linked_clone(self): """ Without linked clone two VM using the same image can't run at the same time. To avoid issue like false detection when a project close and another open we try multiple times. """ trial = 0 while True: found = False for node in self.manager.nodes: if node != self and node.vmname == self.vmname: found = True if node.project != self.project: if trial >= 30: raise VirtualBoxError( "Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in project {}" .format(self.vmname, node.name, self.project.name)) else: if trial >= 5: raise VirtualBoxError( "Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in this project" .format(self.vmname, node.name)) if not found: return trial += 1 yield from asyncio.sleep(1)
def create(self): if not self.linked_clone: yield from self._check_duplicate_linked_clone() yield from self._get_system_properties() if "API version" not in self._system_properties: raise VirtualBoxError( "Can't access to VirtualBox API version:\n{}".format( self._system_properties)) if parse_version( self._system_properties["API version"]) < parse_version("4_3"): raise VirtualBoxError( "The VirtualBox API version is lower than 4.3") log.info("VirtualBox VM '{name}' [{id}] created".format(name=self.name, id=self.id)) if self.linked_clone: if self.id and os.path.isdir( os.path.join(self.working_dir, self._vmname)): self._patch_vm_uuid() yield from self.manager.execute("registervm", [self._linked_vbox_file()]) yield from self._reattach_linked_hdds() else: yield from self._create_linked_clone() if self._adapters: yield from self.set_adapters(self._adapters) vm_info = yield from self._get_vm_info() if "memory" in vm_info: self._ram = int(vm_info["memory"])
def stop_capture(self, adapter_number): """ Stops a packet capture. :param adapter_number: adapter number """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError( "Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'" .format(name=self.name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise VirtualBoxError( "Adapter {} is not connected".format(adapter_number)) nio.stopPacketCapture() if self.ubridge: yield from self._ubridge_send('bridge stop_capture {name}'.format( name="VBOX-{}-{}".format(self._id, adapter_number))) log.info( "VirtualBox VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}" .format(name=self.name, id=self.id, adapter_number=adapter_number))
def _patch_vm_uuid(self): """ Fix the VM uuid in the case of linked clone """ if os.path.exists(self._linked_vbox_file()): try: tree = ET.parse(self._linked_vbox_file()) except ET.ParseError: raise VirtualBoxError( "Cannot modify VirtualBox linked nodes file. " "File {} is corrupted.".format(self._linked_vbox_file())) machine = tree.getroot().find( "{http://www.virtualbox.org/}Machine") if machine is not None and machine.get( "uuid") != "{" + self.id + "}": for image in tree.getroot().findall( "{http://www.virtualbox.org/}Image"): currentSnapshot = machine.get("currentSnapshot") if currentSnapshot: newSnapshot = re.sub("\{.*\}", "{" + str(uuid.uuid4()) + "}", currentSnapshot) shutil.move( os.path.join(self.working_dir, self._vmname, "Snapshots", currentSnapshot) + ".vdi", os.path.join(self.working_dir, self._vmname, "Snapshots", newSnapshot) + ".vdi") image.set("uuid", newSnapshot) machine.set("uuid", "{" + self.id + "}") tree.write(self._linked_vbox_file())
def start_capture(self, adapter_number, output_file): """ Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError( "Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'" .format(name=self.name, adapter_number=adapter_number)) if not self.use_ubridge: vm_state = yield from self._get_vm_state() if vm_state == "running" or vm_state == "paused" or vm_state == "stuck": raise VirtualBoxError( "Sorry, packet capturing on a started VirtualBox VM is not supported without using uBridge" ) nio = adapter.get_nio(0) if not nio: raise VirtualBoxError( "Adapter {} is not connected".format(adapter_number)) if nio.capturing: raise VirtualBoxError( "Packet capture is already activated on adapter {adapter_number}" .format(adapter_number=adapter_number)) nio.startPacketCapture(output_file) if self.ubridge: yield from self._ubridge_send( 'bridge start_capture {name} "{output_file}"'.format( name="VBOX-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info( "VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}" .format(name=self.name, id=self.id, adapter_number=adapter_number))
def start(self): """ Starts this VirtualBox VM. """ if self.status == "started": return # resume the VM if it is paused vm_state = yield from self._get_vm_state() if vm_state == "paused": yield from self.resume() return # VM must be powered off to start it if vm_state != "poweroff": raise VirtualBoxError("VirtualBox VM not powered off") yield from self._set_network_options() yield from self._set_serial_console() # check if there is enough RAM to run self.check_available_ram(self.ram) args = [self._vmname] if self._headless: args.extend(["--type", "headless"]) result = yield from self.manager.execute("startvm", args) self.status = "started" log.info("VirtualBox VM '{name}' [{id}] started".format(name=self.name, id=self.id)) log.debug("Start result: {}".format(result)) # add a guest property to let the VM know about the GNS3 name yield from self.manager.execute( "guestproperty", ["set", self._vmname, "NameInGNS3", self.name]) # add a guest property to let the VM know about the GNS3 project directory yield from self.manager.execute( "guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir]) if self.use_ubridge: yield from self._start_ubridge() for adapter_number in range(0, self._adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: yield from self._add_ubridge_udp_connection( "VBOX-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) yield from self._start_console() if (yield from self.check_hw_virtualization()): self._hw_virtualization = True
def _get_vm_state(self): """ Returns the VM state (e.g. running, paused etc.) :returns: state (string) """ results = yield from self.manager.execute( "showvminfo", [self._vmname, "--machinereadable"]) for info in results: if '=' in info: name, value = info.split('=', 1) if name == "VMState": return value.strip('"') raise VirtualBoxError("Could not get VM state for {}".format( self._vmname))
def adapter_remove_nio_binding(self, adapter_number): """ Removes an adapter NIO binding. :param adapter_number: adapter number :returns: NIO instance """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError( "Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'" .format(name=self.name, adapter_number=adapter_number)) if self.ubridge: yield from self._ubridge_send("bridge delete {name}".format( name="VBOX-{}-{}".format(self._id, adapter_number))) vm_state = yield from self._get_vm_state() if vm_state == "running": yield from self._control_vm( "setlinkstate{} off".format(adapter_number + 1)) else: vm_state = yield from self._get_vm_state() if vm_state == "running": # dynamically disable the VirtualBox adapter yield from self._control_vm( "setlinkstate{} off".format(adapter_number + 1)) yield from self._control_vm( "nic{} null".format(adapter_number + 1)) nio = adapter.get_nio(0) if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) adapter.remove_nio(0) log.info( "VirtualBox VM '{name}' [{id}]: {nio} removed from adapter {adapter_number}" .format(name=self.name, id=self.id, nio=nio, adapter_number=adapter_number)) return nio
def _get_pipe_name(self): """ Returns the pipe name to create a serial connection. :returns: pipe path (string) """ if sys.platform.startswith("win"): pipe_name = r"\\.\pipe\gns3_vbox\{}".format(self.id) else: pipe_name = os.path.join(tempfile.gettempdir(), "gns3_vbox", "{}".format(self.id)) try: os.makedirs(os.path.dirname(pipe_name), exist_ok=True) except OSError as e: raise VirtualBoxError( "Could not create the VirtualBox pipe directory: {}". format(e)) return pipe_name
def set_adapters(self, adapters): """ Sets the number of Ethernet adapters for this VirtualBox VM instance. :param adapters: number of adapters """ # check for the maximum adapters supported by the VM self._maximum_adapters = yield from self._get_maximum_supported_adapters( ) if adapters > self._maximum_adapters: raise VirtualBoxError( "Number of adapters above the maximum supported of {}".format( self._maximum_adapters)) self._ethernet_adapters.clear() for adapter_number in range(0, adapters): self._ethernet_adapters[adapter_number] = EthernetAdapter() self._adapters = len(self._ethernet_adapters) log.info( "VirtualBox VM '{name}' [{id}] has changed the number of Ethernet adapters to {adapters}" .format(name=self.name, id=self.id, adapters=adapters))
def _set_network_options(self): """ Configures network options. """ nic_attachments = yield from self._get_nic_attachements( self._maximum_adapters) for adapter_number in range(0, self._adapters): attachment = nic_attachments[adapter_number] if attachment == "null": # disconnect the cable if no backend is attached. yield from self._modify_vm( "--cableconnected{} off".format(adapter_number + 1)) if attachment == "none": # set the backend to null to avoid a difference in the number of interfaces in the Guest. yield from self._modify_vm( "--nic{} null".format(adapter_number + 1)) yield from self._modify_vm( "--cableconnected{} off".format(adapter_number + 1)) if self.use_ubridge: # use a local UDP tunnel to connect to uBridge instead if adapter_number not in self._local_udp_tunnels: self._local_udp_tunnels[ adapter_number] = self._create_local_udp_tunnel() nio = self._local_udp_tunnels[adapter_number][0] else: nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: if not self._use_any_adapter and attachment not in ("none", "null", "generic"): raise VirtualBoxError( "Attachment ({}) already configured on adapter {}. " "Please set it to 'Not attached' to allow GNS3 to use it." .format(attachment, adapter_number + 1)) yield from self._modify_vm( "--nictrace{} off".format(adapter_number + 1)) vbox_adapter_type = "82540EM" if self._adapter_type == "PCnet-PCI II (Am79C970A)": vbox_adapter_type = "Am79C970A" if self._adapter_type == "PCNet-FAST III (Am79C973)": vbox_adapter_type = "Am79C973" if self._adapter_type == "Intel PRO/1000 MT Desktop (82540EM)": vbox_adapter_type = "82540EM" if self._adapter_type == "Intel PRO/1000 T Server (82543GC)": vbox_adapter_type = "82543GC" if self._adapter_type == "Intel PRO/1000 MT Server (82545EM)": vbox_adapter_type = "82545EM" if self._adapter_type == "Paravirtualized Network (virtio-net)": vbox_adapter_type = "virtio" args = [ self._vmname, "--nictype{}".format(adapter_number + 1), vbox_adapter_type ] yield from self.manager.execute("modifyvm", args) if isinstance(nio, NIOUDP): log.debug("setting UDP params on adapter {}".format( adapter_number)) yield from self._modify_vm( "--nic{} generic".format(adapter_number + 1)) yield from self._modify_vm( "--nicgenericdrv{} UDPTunnel".format(adapter_number + 1)) yield from self._modify_vm( "--nicproperty{} sport={}".format( adapter_number + 1, nio.lport)) yield from self._modify_vm( "--nicproperty{} dest={}".format( adapter_number + 1, nio.rhost)) yield from self._modify_vm( "--nicproperty{} dport={}".format( adapter_number + 1, nio.rport)) yield from self._modify_vm( "--cableconnected{} on".format(adapter_number + 1)) if nio.capturing: yield from self._modify_vm( "--nictrace{} on".format(adapter_number + 1)) yield from self._modify_vm('--nictracefile{} "{}"'.format( adapter_number + 1, nio.pcap_output_file)) if self.use_ubridge and not self._ethernet_adapters[ adapter_number].get_nio(0): yield from self._modify_vm( "--cableconnected{} off".format(adapter_number + 1)) for adapter_number in range(self._adapters, self._maximum_adapters): log.debug("disabling remaining adapter {}".format(adapter_number)) yield from self._modify_vm("--nic{} none".format(adapter_number + 1))
def adapter_add_nio_binding(self, adapter_number, nio): """ Adds an adapter NIO binding. :param adapter_number: adapter number :param nio: NIO instance to add to the slot/port """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError( "Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'" .format(name=self.name, adapter_number=adapter_number)) if self.ubridge: try: yield from self._add_ubridge_udp_connection( "VBOX-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) except KeyError: raise VirtualBoxError( "Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'" .format(name=self.name, adapter_number=adapter_number)) yield from self._control_vm( "setlinkstate{} on".format(adapter_number + 1)) else: vm_state = yield from self._get_vm_state() if vm_state == "running": if isinstance(nio, NIOUDP): # dynamically configure an UDP tunnel on the VirtualBox adapter yield from self._control_vm( "nic{} generic UDPTunnel".format(adapter_number + 1)) yield from self._control_vm( "nicproperty{} sport={}".format( adapter_number + 1, nio.lport)) yield from self._control_vm("nicproperty{} dest={}".format( adapter_number + 1, nio.rhost)) yield from self._control_vm( "nicproperty{} dport={}".format( adapter_number + 1, nio.rport)) yield from self._control_vm( "setlinkstate{} on".format(adapter_number + 1)) # check if the UDP tunnel has been correctly set vm_info = yield from self._get_vm_info() generic_driver_number = "generic{}".format(adapter_number + 1) if generic_driver_number not in vm_info and vm_info[ generic_driver_number] != "UDPTunnel": log.warning( "UDP tunnel has not been set on nic: {}".format( adapter_number + 1)) self.project.emit( "log.warning", { "message": "UDP tunnel has not been set on nic: {}". format(adapter_number + 1) }) adapter.add_nio(0, nio) log.info( "VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}" .format(name=self.name, id=self.id, nio=nio, adapter_number=adapter_number))