Ejemplo n.º 1
0
    def vm_create(self,
                  vm_name,
                  vm_type,
                  vm_user,
                  vm_networkassoc,
                  vm_cpuarch,
                  vm_image,
                  vm_mem,
                  vm_cores,
                  vm_storage,
                  customization=None,
                  vm_keepalive=0,
                  job_proxy_file_path=None,
                  myproxy_creds_name=None,
                  myproxy_server=None,
                  myproxy_server_port=None,
                  job_per_core=False,
                  proxy_non_boot=False,
                  vmimage_proxy_file=None,
                  vmimage_proxy_file_path=None):
        """Attempt to boot up a new VM on the cluster."""
        def _remove_files(files):
            """Private function to clean up temporary files created during the create process."""
            for file in files:
                try:
                    if file:
                        log.verbose("Deleting %s" % file)
                        os.remove(file)
                except:
                    log.exception("Couldn't delete %s" % file)

        log.verbose("Nimbus cloud create command")

        if vm_networkassoc == "":
            # No network specified, so just pick the first available one
            try:
                for netpool in self.net_slots.keys():
                    if self.net_slots[netpool] > 0:
                        vm_networkassoc = netpool
                        break
                if vm_networkassoc == "":
                    vm_networkassoc = self.network_pools[0]
            except:
                log.exception(
                    "No network pool available? Aborting vm creation.")
                return self.ERROR

        # Create a workspace metadata xml file
        if not self.temp_lease_storage:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, vm_storage > 0, self.image_attach_device,
                self.scratch_attach_device,)
        else:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, False, self.image_attach_device,
                self.scratch_attach_device,)

        # Create a deployment request file
        if not self.temp_lease_storage:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = vm_storage, vm_nodes = self.VM_NODES, vm_cores=vm_cores)
        else:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = None, vm_nodes = self.VM_NODES, vm_cores=vm_cores)

        job_proxy = None
        try:
            with open(job_proxy_file_path) as proxy:
                job_proxy = proxy.read()
        except:
            if job_proxy_file_path:
                log.exception(
                    "Couldn't open '%s', Backing out of VM Creation." %
                    (job_proxy_file_path))
                return -1  # Temp Ban job

        if customization or job_proxy or vmimage_proxy_file:
            image_scheme = urlparse(vm_image).scheme
            if image_scheme == "https":
                if vmimage_proxy_file:
                    try:
                        with open(vmimage_proxy_file_path) as proxy:
                            vmimage_proxy = proxy.read()
                    except:
                        if vmimage_proxy_file:
                            log.exception(
                                "Couldn't open '%s' path for %s, Backing out of VM Creation."
                                %
                                (vmimage_proxy_file_path, vmimage_proxy_file))
                            return -1  # Temp Ban job
                    _job_proxy = vmimage_proxy
                else:
                    _job_proxy = job_proxy
            else:
                _job_proxy = None
            vm_optional = nimbus_xml.ws_optional_factory(
                custom_tasks=customization, credential=_job_proxy)
        else:
            vm_optional = None

        # Set a timestamp for VM creation
        now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp(suffix=".vm_epr")
        os.close(epr_handle)

        nimbus_files = [vm_epr, vm_metadata, vm_deploymentrequest, vm_optional]

        # Create cached copy of job proxy to be used by VM for startup and shutdown.
        vm_proxy_file_path = None
        if job_proxy_file_path and not proxy_non_boot:
            try:
                vm_proxy_file_path = self._cache_proxy(job_proxy_file_path)
                log.verbose("Cached proxy to '%s'" % vm_proxy_file_path)
            except:
                log.exception("Problem caching proxy.")
                _remove_files(nimbus_files)
                return -1

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr,
                                       vm_metadata,
                                       vm_deploymentrequest,
                                       optional_file=vm_optional)

        log.debug("Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        env = None
        if vm_proxy_file_path != None and not proxy_non_boot:
            env = {'X509_USER_PROXY': vm_proxy_file_path}
            log.debug(
                "VM creation environment will contain:\n\tX509_USER_PROXY = %s"
                % (vm_proxy_file_path))

        (create_return, create_out, create_err) = self.vm_execwait(ws_cmd, env)

        if (create_return != 0):
            if create_out == "" or create_out == None:
                create_out = "No Output returned."
            if create_err == "" or create_err == None:
                create_err = "No Error output returned."
            log.warning("Error creating VM %s: %s %s %s" %
                        (vm_name, create_out, create_err, create_return))
            _remove_files(nimbus_files + [vm_proxy_file_path])
            err_type = self._extract_create_error(create_err)
            ## TODO Figure out some error codes to return then handle the codes in the scheduler vm creation code
            if err_type == 'NoProxy' or err_type == 'ExpiredProxy':
                create_return = -1
            elif err_type == 'NoSlotsInNetwork' and config.adjust_insufficient_resources:
                with self.res_lock:
                    if vm_networkassoc in self.net_slots.keys():
                        self.vm_slots -= self.net_slots[vm_networkassoc]
                        self.net_slots[
                            vm_networkassoc] = 0  # no slots remaining
                create_return = -2
            elif err_type == 'NotEnoughMemory' and config.adjust_insufficient_resources:
                with self.res_lock:
                    index = self.find_mementry(vm_mem)
                    self.memory[
                        index] = vm_mem - 1  # may still be memory, but just not enough for this vm
                create_return = -2
            elif err_type == 'ExceedMaximumWorkspaces' or err_type == 'NotAuthorized':
                create_return = -3

            return create_return

        log.verbose("Nimbus create command executed.")

        log.verbose("Deleting temporary Nimbus Metadata files")
        _remove_files(nimbus_files)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.verbose("Memory entry found in given cluster: %d" % vm_mementry)

        # Get the id of the VM from the output of workspace.sh
        try:
            vm_id = re.search("Workspace created: id (\d*)",
                              create_out).group(1)
        except:
            log.error("Couldn't find workspace id for new VM")
            create_return = -3
            return create_return
        try:
            vm_ip = re.search(
                "IP address: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
                create_out).group(1)
        except:
            log.error("Couldn't find the ip address for new VM")
            create_return = -3
            return create_return

        # Get the first part of the hostname given to the VM
        vm_hostname = self._extract_hostname(create_out)
        if vm_hostname:
            log.verbose("Hostname for vm_id %s is %s" % (vm_id, vm_hostname))
        else:
            log.warning("Unable to get the VM hostname, for vm_id %s" % vm_id)

        # Create a VM object to represent the newly created VM
        new_vm = cluster_tools.VM(name=vm_name,
                                  id=vm_id,
                                  vmtype=vm_type,
                                  user=vm_user,
                                  hostname=vm_hostname,
                                  ipaddress=vm_ip,
                                  clusteraddr=self.network_address,
                                  clusterport=self.port,
                                  cloudtype=self.cloud_type,
                                  network=vm_networkassoc,
                                  cpuarch=vm_cpuarch,
                                  image=vm_image,
                                  memory=vm_mem,
                                  mementry=vm_mementry,
                                  cpucores=vm_cores,
                                  storage=vm_storage,
                                  keep_alive=vm_keepalive,
                                  proxy_file=vm_proxy_file_path,
                                  myproxy_creds_name=myproxy_creds_name,
                                  myproxy_server=myproxy_server,
                                  myproxy_server_port=myproxy_server_port,
                                  job_per_core=job_per_core)

        # Add the new VM object to the cluster's vms list And check out required resources
        self.vms.append(new_vm)
        try:
            self.resource_checkout(new_vm)
        except:
            log.exception(
                "Unexpected error checking out resources when creating a VM. Programming error?"
            )
            return self.ERROR

        log.info("Started vm %s on %s using image at %s" %
                 (new_vm.id, new_vm.clusteraddr, new_vm.image))
        return create_return
Ejemplo n.º 2
0
    def vm_create(self, vm_name, vm_type, vm_networkassoc, vm_cpuarch,
            vm_image, vm_mem, vm_cores, vm_storage, customization=None, vm_keepalive=0,
            job_proxy_file_path=None, job_per_core=False):

        log.debug("Nimbus cloud create command")

        if vm_networkassoc == "":
            # No network specified, so just pick the first available one
            try:
                vm_networkassoc = self.network_pools[0]
            except:
                log.exception("No network pool available? Aborting vm creation.")
                return self.ERROR

        # Create a workspace metadata xml file
        vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image)

        # Create a deployment request file
        vm_deploymentrequest = nimbus_xml.ws_deployment_factory(self.VM_DURATION, \
                self.VM_TARGETSTATE, vm_mem, vm_storage, self.VM_NODES, vm_cores=vm_cores)

        if customization:
            vm_optional = nimbus_xml.ws_optional_factory(customization)
        else:
            vm_optional = None


        # Set a timestamp for VM creation
        now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp(suffix=".vm_epr")
        os.close(epr_handle)

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr, vm_metadata, vm_deploymentrequest, optional_file=vm_optional)
        

        log.debug("vm_create - Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        env = None;
        if job_proxy_file_path != None:
            env = {'X509_USER_PROXY':job_proxy_file_path}
            log.debug("VM creation environment will contain:\n\tX509_USER_PROXY = %s" % (job_proxy_file_path))
        
        (create_return, create_out, create_err) = self.vm_execwait(ws_cmd, env)
        if (create_return != 0):
            log.warning("vm_create - Error creating VM %s: %s %s" % (vm_name, create_out, create_err))
            return create_return

        log.debug("(vm_create) - workspace create command executed.")

        log.debug("vm_create - Deleting temporary Nimbus Metadata files")
        os.remove(vm_metadata)
        os.remove(vm_deploymentrequest)
        if vm_optional:
            os.remove(vm_optional)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("(vm_create) - Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.debug("(vm_create) - vm_create - Memory entry found in given cluster: %d" % vm_mementry)

        # Get the id of the VM from the output of workspace.sh
        try:
            vm_id = re.search("Workspace created: id (\d*)", create_out).group(1)
            # Renaming VM epr file for user-friendly reference.
            os.rename(vm_epr, "%s.%s" % (vm_epr, vm_id))
        except:
            log.error("vm_create - couldn't find workspace id for new VM")

        # Get the first part of the hostname given to the VM
        vm_hostname = self._extract_hostname(create_out)
        if vm_hostname:
            log.debug("Hostname for vm_id %s is %s" % (vm_id, vm_hostname))
        else:
            log.warning("Unable to get the VM hostname, for vm_id %s" % vm_id)


        # Create a VM object to represent the newly created VM
        new_vm = VM(name = vm_name, id = vm_id, vmtype = vm_type,
            hostname = vm_hostname, clusteraddr = self.network_address,
            cloudtype = self.cloud_type,network = vm_networkassoc,
            cpuarch = vm_cpuarch, image = vm_image,
            memory = vm_mem, mementry = vm_mementry, cpucores = vm_cores,
            storage = vm_storage, keep_alive = vm_keepalive, 
            proxy_file = job_proxy_file_path, job_per_core = job_per_core)

        # Add the new VM object to the cluster's vms list And check out required resources
        self.vms.append(new_vm)
        try:
            self.resource_checkout(new_vm)
        except:
            log.exception("Unexpected error checking out resources when creating a VM. Programming error?")
            return self.ERROR

        log.info("Started vm %s on %s using image at %s" % (new_vm.id, new_vm.clusteraddr, new_vm.image))
        return create_return
Ejemplo n.º 3
0
    def vm_create(self, vm_name, vm_type, vm_user, vm_networkassoc, vm_cpuarch,
            vm_image, vm_mem, vm_cores, vm_storage, customization=None, vm_keepalive=0,
            job_proxy_file_path=None, myproxy_creds_name=None, myproxy_server=None, 
            myproxy_server_port=None, job_per_core=False, proxy_non_boot=False,
            vmimage_proxy_file=None, vmimage_proxy_file_path=None):
        """Attempt to boot up a new VM on the cluster."""
        def _remove_files(files):
            """Private function to clean up temporary files created during the create process."""
            for file in files:
                try:
                    if file:
                        log.verbose("Deleting %s" % file)
                        os.remove(file)
                except:
                    log.exception("Couldn't delete %s" % file)

        log.verbose("Nimbus cloud create command")

        if vm_networkassoc == "":
            # No network specified, so just pick the first available one
            try:
                for netpool in self.net_slots.keys():
                    if self.net_slots[netpool] > 0:
                        vm_networkassoc = netpool
                        break
                if vm_networkassoc == "":
                    vm_networkassoc = self.network_pools[0]
            except:
                log.exception("No network pool available? Aborting vm creation.")
                return self.ERROR

        # Create a workspace metadata xml file
        if not self.temp_lease_storage:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, vm_storage > 0, self.image_attach_device,
                self.scratch_attach_device,)
        else:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, False, self.image_attach_device,
                self.scratch_attach_device,)


        # Create a deployment request file
        if not self.temp_lease_storage:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = vm_storage, vm_nodes = self.VM_NODES, vm_cores=vm_cores)
        else:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = None, vm_nodes = self.VM_NODES, vm_cores=vm_cores)

        job_proxy = None
        try:
            with open(job_proxy_file_path) as proxy:
                job_proxy = proxy.read()
        except:
            if job_proxy_file_path:
                log.exception("Couldn't open '%s', Backing out of VM Creation." % (job_proxy_file_path))
                return -1 # Temp Ban job

        if customization or job_proxy or vmimage_proxy_file:
            image_scheme = urlparse(vm_image).scheme
            if image_scheme == "https":
                if vmimage_proxy_file:
                    try:
                        with open(vmimage_proxy_file_path) as proxy:
                            vmimage_proxy = proxy.read()
                    except:
                        if vmimage_proxy_file:
                            log.exception("Couldn't open '%s' path for %s, Backing out of VM Creation." % (vmimage_proxy_file_path, vmimage_proxy_file))
                            return -1 # Temp Ban job
                    _job_proxy = vmimage_proxy
                else:
                    _job_proxy = job_proxy
            else:
                _job_proxy = None
            vm_optional = nimbus_xml.ws_optional_factory(custom_tasks=customization, credential=_job_proxy)
        else:
            vm_optional = None


        # Set a timestamp for VM creation
        now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp(suffix=".vm_epr")
        os.close(epr_handle)

        nimbus_files = [vm_epr, vm_metadata, vm_deploymentrequest, vm_optional]

        # Create cached copy of job proxy to be used by VM for startup and shutdown.
        vm_proxy_file_path = None
        if job_proxy_file_path and not proxy_non_boot:
            try:
                vm_proxy_file_path = self._cache_proxy(job_proxy_file_path)
                log.verbose("Cached proxy to '%s'" % vm_proxy_file_path)
            except:
                log.exception("Problem caching proxy.")
                _remove_files(nimbus_files)
                return -1

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr, vm_metadata, vm_deploymentrequest, optional_file=vm_optional)
        

        log.debug("Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        env = None;
        if vm_proxy_file_path != None and not proxy_non_boot:
            env = {'X509_USER_PROXY':vm_proxy_file_path}
            log.debug("VM creation environment will contain:\n\tX509_USER_PROXY = %s" % (vm_proxy_file_path))

        (create_return, create_out, create_err) = self.vm_execwait(ws_cmd, env)

        if (create_return != 0):
            if create_out == "" or create_out == None:
                create_out = "No Output returned."
            if create_err == "" or create_err == None:
                create_err = "No Error output returned."
            log.warning("Error creating VM %s: %s %s %s" % (vm_name, create_out, create_err, create_return))
            _remove_files(nimbus_files + [vm_proxy_file_path])
            err_type = self._extract_create_error(create_err)
            ## TODO Figure out some error codes to return then handle the codes in the scheduler vm creation code
            if err_type == 'NoProxy' or err_type == 'ExpiredProxy':
                create_return = -1
            elif err_type == 'NoSlotsInNetwork' and config.adjust_insufficient_resources:
                with self.res_lock:
                    if vm_networkassoc in self.net_slots.keys():
                        self.vm_slots -= self.net_slots[vm_networkassoc]
                        self.net_slots[vm_networkassoc] = 0 # no slots remaining
                create_return = -2
            elif err_type =='NotEnoughMemory' and config.adjust_insufficient_resources:
                with self.res_lock:
                    index = self.find_mementry(vm_mem)
                    self.memory[index] = vm_mem - 1 # may still be memory, but just not enough for this vm
                create_return = -2
            elif err_type == 'ExceedMaximumWorkspaces' or err_type == 'NotAuthorized':
                create_return = -3

            return create_return

        log.verbose("Nimbus create command executed.")

        log.verbose("Deleting temporary Nimbus Metadata files")
        _remove_files(nimbus_files)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.verbose("Memory entry found in given cluster: %d" % vm_mementry)

        # Get the id of the VM from the output of workspace.sh
        try:
            vm_id = re.search("Workspace created: id (\d*)", create_out).group(1)
        except:
            log.error("Couldn't find workspace id for new VM")
            create_return = -3
            return create_return
        try:
            vm_ip = re.search("IP address: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", create_out).group(1)
        except:
            log.error("Couldn't find the ip address for new VM")
            create_return = -3
            return create_return

        # Get the first part of the hostname given to the VM
        vm_hostname = self._extract_hostname(create_out)
        if vm_hostname:
            log.verbose("Hostname for vm_id %s is %s" % (vm_id, vm_hostname))
        else:
            log.warning("Unable to get the VM hostname, for vm_id %s" % vm_id)


        # Create a VM object to represent the newly created VM
        new_vm = cluster_tools.VM(name = vm_name, id = vm_id, vmtype = vm_type, user = vm_user,
            hostname = vm_hostname, ipaddress = vm_ip, 
            clusteraddr = self.network_address, clusterport = self.port,
            cloudtype = self.cloud_type,network = vm_networkassoc,
            cpuarch = vm_cpuarch, image = vm_image,
            memory = vm_mem, mementry = vm_mementry, cpucores = vm_cores,
            storage = vm_storage, keep_alive = vm_keepalive, 
            proxy_file = vm_proxy_file_path, 
            myproxy_creds_name = myproxy_creds_name, myproxy_server = myproxy_server, 
            myproxy_server_port = myproxy_server_port, job_per_core = job_per_core)

        # Add the new VM object to the cluster's vms list And check out required resources
        self.vms.append(new_vm)
        try:
            self.resource_checkout(new_vm)
        except:
            log.exception("Unexpected error checking out resources when creating a VM. Programming error?")
            return self.ERROR

        log.info("Started vm %s on %s using image at %s" % (new_vm.id, new_vm.clusteraddr, new_vm.image))
        return create_return
Ejemplo n.º 4
0
    def vm_create(self, vm_name, vm_type, vm_networkassoc, vm_cpuarch,
            vm_imagelocation, vm_mem, vm_cores, vm_storage):

        log.debug("Nimbus cloud create command")

        # Create a workspace metadata xml file from passed parameters
        vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_imagelocation)
        
        # Create a deployment request file from given parameters
        vm_deploymentrequest = nimbus_xml.ws_deployment_factory(self.VM_DURATION, \
                self.VM_TARGETSTATE, vm_mem, vm_storage, self.VM_NODES)

        # Set a timestamp for VM creation
        now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp()
        os.close(epr_handle)

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr, vm_metadata, vm_deploymentrequest)
        log.debug("vm_create - workspace create command prepared.")
        log.debug("vm_create - Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        create_return = self.vm_execute(ws_cmd)
        if (create_return != 0):
            log.warning("vm_create - Error in executing workspace create command.")
            log.warning("vm_create - VM %s (ID: %s) not created. Returning error code." \
              % (vm_name, vm_epr))
            return create_return
        log.debug("(vm_create) - workspace create command executed.")

        log.debug("vm_create - Deleting temporary Nimbus Metadata file")
        os.remove(vm_metadata)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("(vm_create) - Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.debug("(vm_create) - vm_create - Memory entry found in given cluster: %d" % vm_mementry)

        # Create a VM object to represent the newly created VM
        new_vm = VM(name = vm_name, id = vm_epr, vmtype = vm_type,
            clusteraddr = self.network_address, cloudtype = self.cloud_type,
            network = vm_networkassoc, cpuarch = vm_cpuarch,
            imagelocation = vm_imagelocation, memory = vm_mem,
            mementry = vm_mementry, cpucores = vm_cores, storage = vm_storage)

        # Add the new VM object to the cluster's vms list And check out required resources
        self.vms.append(new_vm)
        self.resource_checkout(new_vm)

        log.debug("(vm_create) - VM created and stored, cluster updated.")
        return create_return