Exemple #1
0
    def get_vms_local(self):
        """ Create vms objects list from vm on Openstack at this moment."""
        nova = self.get_creds_nova()
        try:
            instances = nova.servers.list()
        except Exception as e:
            self.logger.error(
                "Unable to get instance list by method nova.servers.list().")
            return 0

#del vms[:]
        vms = []
        self.newVmUUID = {}
        try:
            instance_num = len(instances)
            for instance in instances:
                _uuid = getattr(instance, 'id')
                _name = getattr(instance, 'name')
                try:
                    _temp_dict = getattr(instance, 'addresses')
                    _temp_list = _temp_dict.keys()
                    _network = _temp_list[0]
                    #print _network
                    _ipaddress = _temp_dict[_temp_list[0]][0]['addr']
                    _macaddress = _temp_dict[
                        _temp_list[0]][0]['OS-EXT-IPS-MAC:mac_addr']
                except Exception as e:
                    _network = ""
                    _ipaddress = ""
                    _macaddress = ""
                    self.logger.error(
                        "Unable to get ipaddress or macaddress by novaclient for vm: %s %s."
                        % (_name, _uuid))
                try:
                    _temp_dict = getattr(instance, 'image')
                    _image_id = _temp_dict['id']
                    _flavor = getattr(instance, 'flavor')['id']
                except Exception as e:
                    _image_id = ""
                    _flavor = ""
                    self.logger.error(
                        "Unable to get image_id or flavor by novaclient for vm: %s %s."
                        % (_name, _uuid))

                try:
                    new_vm = cluster_tools.VM()
                    new_vm = cluster_tools.VM(name=_name,
                                              uuid=_uuid,
                                              resource_group_type='',
                                              group='',
                                              owner='',
                                              activity='',
                                              jobid='',
                                              hostname='',
                                              ipaddress=_ipaddress,
                                              macaddress=_macaddress,
                                              network=_network,
                                              clusteraddr='',
                                              clusterport='',
                                              cloudtype='openstack',
                                              image_name=_image_id,
                                              flavor=_flavor,
                                              cpucores=0,
                                              memory=0,
                                              storage=0,
                                              keep_alive=0)
                except Exception as e:
                    print e
                vms.append(new_vm)
        except Exception as e:
            self.logger.error(
                "Unable to transfer nova.servers.list() into cluster_tools vm objects."
            )
            return 0
        return vms
Exemple #2
0
    def vm_create(self,
                  vm_name,
                  vm_type,
                  vm_user,
                  vm_networkassoc,
                  vm_cpuarch,
                  vm_image,
                  vm_mem,
                  vm_cores,
                  vm_storage,
                  customization=None,
                  vm_keepalive=0,
                  job_proxy_file_path=None,
                  myproxy_creds_name=None,
                  myproxy_server=None,
                  myproxy_server_port=None,
                  job_per_core=False,
                  proxy_non_boot=False,
                  vmimage_proxy_file=None,
                  vmimage_proxy_file_path=None):
        """Attempt to boot up a new VM on the cluster."""
        def _remove_files(files):
            """Private function to clean up temporary files created during the create process."""
            for file in files:
                try:
                    if file:
                        log.verbose("Deleting %s" % file)
                        os.remove(file)
                except:
                    log.exception("Couldn't delete %s" % file)

        log.verbose("Nimbus cloud create command")

        if vm_networkassoc == "":
            # No network specified, so just pick the first available one
            try:
                for netpool in self.net_slots.keys():
                    if self.net_slots[netpool] > 0:
                        vm_networkassoc = netpool
                        break
                if vm_networkassoc == "":
                    vm_networkassoc = self.network_pools[0]
            except:
                log.exception(
                    "No network pool available? Aborting vm creation.")
                return self.ERROR

        # Create a workspace metadata xml file
        if not self.temp_lease_storage:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, vm_storage > 0, self.image_attach_device,
                self.scratch_attach_device,)
        else:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                vm_cpuarch, vm_image, False, self.image_attach_device,
                self.scratch_attach_device,)

        # Create a deployment request file
        if not self.temp_lease_storage:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = vm_storage, vm_nodes = self.VM_NODES, vm_cores=vm_cores)
        else:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = None, vm_nodes = self.VM_NODES, vm_cores=vm_cores)

        job_proxy = None
        try:
            with open(job_proxy_file_path) as proxy:
                job_proxy = proxy.read()
        except:
            if job_proxy_file_path:
                log.exception(
                    "Couldn't open '%s', Backing out of VM Creation." %
                    (job_proxy_file_path))
                return -1  # Temp Ban job

        if customization or job_proxy or vmimage_proxy_file:
            image_scheme = urlparse(vm_image).scheme
            if image_scheme == "https":
                if vmimage_proxy_file:
                    try:
                        with open(vmimage_proxy_file_path) as proxy:
                            vmimage_proxy = proxy.read()
                    except:
                        if vmimage_proxy_file:
                            log.exception(
                                "Couldn't open '%s' path for %s, Backing out of VM Creation."
                                %
                                (vmimage_proxy_file_path, vmimage_proxy_file))
                            return -1  # Temp Ban job
                    _job_proxy = vmimage_proxy
                else:
                    _job_proxy = job_proxy
            else:
                _job_proxy = None
            vm_optional = nimbus_xml.ws_optional_factory(
                custom_tasks=customization, credential=_job_proxy)
        else:
            vm_optional = None

        # Set a timestamp for VM creation
        now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp(suffix=".vm_epr")
        os.close(epr_handle)

        nimbus_files = [vm_epr, vm_metadata, vm_deploymentrequest, vm_optional]

        # Create cached copy of job proxy to be used by VM for startup and shutdown.
        vm_proxy_file_path = None
        if job_proxy_file_path and not proxy_non_boot:
            try:
                vm_proxy_file_path = self._cache_proxy(job_proxy_file_path)
                log.verbose("Cached proxy to '%s'" % vm_proxy_file_path)
            except:
                log.exception("Problem caching proxy.")
                _remove_files(nimbus_files)
                return -1

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr,
                                       vm_metadata,
                                       vm_deploymentrequest,
                                       optional_file=vm_optional)

        log.debug("Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        env = None
        if vm_proxy_file_path != None and not proxy_non_boot:
            env = {'X509_USER_PROXY': vm_proxy_file_path}
            log.debug(
                "VM creation environment will contain:\n\tX509_USER_PROXY = %s"
                % (vm_proxy_file_path))

        (create_return, create_out, create_err) = self.vm_execwait(ws_cmd, env)

        if (create_return != 0):
            if create_out == "" or create_out == None:
                create_out = "No Output returned."
            if create_err == "" or create_err == None:
                create_err = "No Error output returned."
            log.warning("Error creating VM %s: %s %s %s" %
                        (vm_name, create_out, create_err, create_return))
            _remove_files(nimbus_files + [vm_proxy_file_path])
            err_type = self._extract_create_error(create_err)
            ## TODO Figure out some error codes to return then handle the codes in the scheduler vm creation code
            if err_type == 'NoProxy' or err_type == 'ExpiredProxy':
                create_return = -1
            elif err_type == 'NoSlotsInNetwork' and config.adjust_insufficient_resources:
                with self.res_lock:
                    if vm_networkassoc in self.net_slots.keys():
                        self.vm_slots -= self.net_slots[vm_networkassoc]
                        self.net_slots[
                            vm_networkassoc] = 0  # no slots remaining
                create_return = -2
            elif err_type == 'NotEnoughMemory' and config.adjust_insufficient_resources:
                with self.res_lock:
                    index = self.find_mementry(vm_mem)
                    self.memory[
                        index] = vm_mem - 1  # may still be memory, but just not enough for this vm
                create_return = -2
            elif err_type == 'ExceedMaximumWorkspaces' or err_type == 'NotAuthorized':
                create_return = -3

            return create_return

        log.verbose("Nimbus create command executed.")

        log.verbose("Deleting temporary Nimbus Metadata files")
        _remove_files(nimbus_files)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.verbose("Memory entry found in given cluster: %d" % vm_mementry)

        # Get the id of the VM from the output of workspace.sh
        try:
            vm_id = re.search("Workspace created: id (\d*)",
                              create_out).group(1)
        except:
            log.error("Couldn't find workspace id for new VM")
            create_return = -3
            return create_return
        try:
            vm_ip = re.search(
                "IP address: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
                create_out).group(1)
        except:
            log.error("Couldn't find the ip address for new VM")
            create_return = -3
            return create_return

        # Get the first part of the hostname given to the VM
        vm_hostname = self._extract_hostname(create_out)
        if vm_hostname:
            log.verbose("Hostname for vm_id %s is %s" % (vm_id, vm_hostname))
        else:
            log.warning("Unable to get the VM hostname, for vm_id %s" % vm_id)

        # Create a VM object to represent the newly created VM
        new_vm = cluster_tools.VM(name=vm_name,
                                  id=vm_id,
                                  vmtype=vm_type,
                                  user=vm_user,
                                  hostname=vm_hostname,
                                  ipaddress=vm_ip,
                                  clusteraddr=self.network_address,
                                  clusterport=self.port,
                                  cloudtype=self.cloud_type,
                                  network=vm_networkassoc,
                                  cpuarch=vm_cpuarch,
                                  image=vm_image,
                                  memory=vm_mem,
                                  mementry=vm_mementry,
                                  cpucores=vm_cores,
                                  storage=vm_storage,
                                  keep_alive=vm_keepalive,
                                  proxy_file=vm_proxy_file_path,
                                  myproxy_creds_name=myproxy_creds_name,
                                  myproxy_server=myproxy_server,
                                  myproxy_server_port=myproxy_server_port,
                                  job_per_core=job_per_core)

        # Add the new VM object to the cluster's vms list And check out required resources
        self.vms.append(new_vm)
        try:
            self.resource_checkout(new_vm)
        except:
            log.exception(
                "Unexpected error checking out resources when creating a VM. Programming error?"
            )
            return self.ERROR

        log.info("Started vm %s on %s using image at %s" %
                 (new_vm.id, new_vm.clusteraddr, new_vm.image))
        return create_return
    def vm_create(self, vm_name, vm_type, vm_user, vm_networkassoc, vm_cpuarch,
                  vm_image, vm_mem, vm_cores, vm_storage, customization=None,
                  vm_keepalive=0, instance_type="", maximum_price=0,
                  job_per_core=False, securitygroup=[]):
        """Attempt to boot a new VM on the cluster."""

        log.verbose("Trying to boot %s on %s" % (vm_type, self.network_address))
        if len(securitygroup) != 0:
            sec_group = []
            for group in securitygroup:
                if group in self.security_groups:
                    sec_group.append(group)
            if len(sec_group) == 0:
                log.warning("No matching security groups - trying default")
                sec_group.append("default")
        else:
            sec_group = self.security_groups

        try:
            vm_ami = vm_image[self.network_address]
        except:
            log.debug("No AMI for %s, trying default" % self.network_address)
            try:
                vm_ami = vm_image["default"]
            except:
                log.exception("Can't find a suitable AMI")
                return

        try:
            i_type = instance_type[self.network_address]
        except:
            log.debug("No instance type for %s, trying default" % self.network_address)
            try:
                i_type = instance_type["default"]
            except:
                if isinstance(instance_type, str):
                    i_type = instance_type
                else:
                    i_type = self.DEFAULT_INSTANCE_TYPE
        instance_type = i_type

        if customization:
            user_data = nimbus_xml.ws_optional(customization)
        else:
            user_data = ""

        if "AmazonEC2" == self.cloud_type and vm_networkassoc != "public":
            log.debug("You requested '%s' networking, but EC2 only supports 'public'" % vm_networkassoc)
            addressing_type = "public"
        else:
            addressing_type = vm_networkassoc

        try:
            connection = self._get_connection()
            image = None
            if not "Eucalyptus" == self.cloud_type:
                image = connection.get_image(vm_ami)

            else: #HACK: for some reason Eucalyptus won't respond properly to
                  #      get_image("whateverimg"). Use a linear search until
                  #      this is fixed
                  # This is Eucalyptus bug #495670
                  # https://bugs.launchpad.net/eucalyptus/+bug/495670
                images = connection.get_all_images()
                for potential_match in images:
                    if potential_match.id == vm_ami:
                        image = potential_match
                        break

            if image:
                if maximum_price is 0: # don't request a spot instance
                    try:
                        reservation = image.run(1,1, key_name=self.key_name,
                                                addressing_type=addressing_type,
                                                user_data=user_data,
                                                placement=self.placement_zone,
                                                security_groups=sec_group,
                                                instance_type=instance_type)
                        instance_id = reservation.instances[0].id
                        log.debug("Booted VM %s" % instance_id)
                    except:
                        log.exception("There was a problem creating an EC2 instance...")
                        return self.ERROR

                else: # get a spot instance of no more than maximum_price
                    try:
                        price_in_dollars = str(float(maximum_price) / 100)
                        reservation = connection.request_spot_instances(
                                                  price_in_dollars,
                                                  image.id,
                                                  key_name=self.key_name,
                                                  user_data=user_data,
                                                  placement=self.placement_zone,
                                                  addressing_type=addressing_type,
                                                  security_groups=self.sec_group,
                                                  instance_type=instance_type)
                        spot_id = str(reservation[0].id)
                        instance_id = ""
                        log.debug("Reserved instance %s at no more than %s" % (spot_id, price_in_dollars))
                    except AttributeError:
                        log.exception("Your version of boto doesn't seem to support "\
                                  "spot instances. You need at least 1.9")
                        return self.ERROR
                    except:
                        log.exception("Problem creating an EC2 spot instance...")
                        return self.ERROR


            else:
                log.error("Couldn't find image %s on %s" % (vm_image, self.name))
                return self.ERROR

        except:
            log.exception("Problem creating EC2 instance on on %s" % self.name)
            return self.ERROR

        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            #TODO: this is kind of pointless with EC2...
            log.debug("Cluster memory list has no sufficient memory " +\
                      "entries (Not supposed to happen). Returning error.")
            return self.ERROR
        log.verbose("vm_create - Memory entry found in given cluster: %d" %
                                                                    vm_mementry)
        new_vm = cluster_tools.VM(name = vm_name, id = instance_id, vmtype = vm_type, user = vm_user,
                    clusteraddr = self.network_address,
                    cloudtype = self.cloud_type, network = vm_networkassoc,
                    cpuarch = vm_cpuarch, image= vm_image,
                    memory = vm_mem, mementry = vm_mementry,
                    cpucores = vm_cores, storage = vm_storage, 
                    keep_alive = vm_keepalive, job_per_core = job_per_core)

        try:
            new_vm.spot_id = spot_id
        except:
            log.verbose("No spot ID to add to VM %s" % instance_id)

        try:
            self.resource_checkout(new_vm)
        except:
            log.exception("Unexpected Error checking out resources when creating a VM. Programming error?")
            self.vm_destroy(new_vm, reason="Failed Resource checkout")
            return self.ERROR

        self.vms.append(new_vm)

        return 0
Exemple #4
0
    def vm_create(self,
                  vm_name,
                  vm_type="CernVM",
                  vm_user="******",
                  vm_networkassoc="",
                  vm_cpuarch="",
                  vm_image="",
                  vm_mem=1,
                  vm_cores=1,
                  vm_storage=30,
                  customization=None,
                  vm_keepalive=0,
                  job_proxy_file_path=None,
                  myproxy_creds_name=None,
                  myproxy_server=None,
                  myproxy_server_port=None,
                  job_per_core=False,
                  proxy_non_boot=False,
                  vmimage_proxy_file=None,
                  vmimage_proxy_file_path=None,
                  vm_loc=''):

        log.debug("Running new instance with Marketplace id %s in StratusLab" %
                  str(vm_loc))
        runner = None

        if vm_loc not in StratusLabCluster.__idMap:
            runner = Runner(
                vm_loc, StratusLabCluster._v_configHolder
            )  #vm_loc: URL of VM or key? Does not seem to matter in Runner (l. 506)
            StratusLabCluster.__idMap[vm_loc] = runner
        else:
            runner = StratusLabCluster.__idMap[vm_loc]

        try:
            ids = runner.runInstance()
            log.debug("Created instances: %s" % str(ids))
            #for new_id in ids:
            new_id = ids[len(ids) - 1]
            new_vm = cluster_tools.VM(name=vm_name,
                                      id=str(new_id),
                                      vmtype=vm_type,
                                      user=vm_user,
                                      network=vm_networkassoc,
                                      cpuarch=vm_cpuarch,
                                      image=vm_image,
                                      memory=vm_mem,
                                      cpucores=vm_cores,
                                      storage=vm_storage,
                                      keep_alive=vm_keepalive,
                                      myproxy_creds_name=myproxy_creds_name,
                                      myproxy_server=myproxy_server,
                                      myproxy_server_port=myproxy_server_port,
                                      job_per_core=job_per_core)

            StratusLabCluster.__vmMap[str(new_id)] = vm_loc

            if vm_loc not in self.__runnerIds:
                self.__runnerIds[vm_loc] = [
                    str(new_id),
                ]
            else:
                self.__runnerIds[vm_loc].append(str(new_id))
            self.vms.append(new_vm)

            try:
                self.resource_checkout(new_vm)
            except:
                log.exception(
                    "Unexpected error checking out resources when creating a VM. Programming error?"
                )
                return self.ERROR
            #endfor
            return 0

        except Exception, e:
            log.debug("Exception running new instance in StratusLab: %s" %
                      str(e))
            #import traceback
            #traceback.print_exc()
            return -1
Exemple #5
0
class GoogleComputeEngineCluster(cluster_tools.ICluster):
    GCE_SCOPE = 'https://www.googleapis.com/auth/compute'

    API_VERSION = 'v1beta15'
    GCE_URL = 'https://www.googleapis.com/compute/%s/projects/' % (API_VERSION)

    DEFAULT_ZONE = 'us-central1-a'  # will need to be option in job
    DEFAULT_MACHINE_TYPE = 'n1-standard-1-d'  # option specified in job config
    DEFAULT_INSTANCE_TYPE_LIST = _attr_list_to_dict(
        config.default_VMInstanceTypeList)
    DEFAULT_IMAGE = 'condorimagebase'

    DEFAULT_NETWORK = 'default'  # job option setup
    DEFAULT_SERVICE_EMAIL = 'default'
    DEFAULT_SCOPES = [
        'https://www.googleapis.com/auth/devstorage.full_control',
        'https://www.googleapis.com/auth/compute'
    ]

    def __init__(self,
                 name="Dummy Cluster",
                 host="localhost",
                 cloud_type="Dummy",
                 memory=[],
                 max_vm_mem=-1,
                 cpu_archs=[],
                 networks=[],
                 vm_slots=0,
                 cpu_cores=0,
                 storage=0,
                 hypervisor='xen',
                 boot_timeout=None,
                 auth_dat_file=None,
                 secret_file=None,
                 security_group=None,
                 project_id=None):

        self.gce_hostname_prefix = 'gce-cs-vm'
        self.gce_hostname_counter = 0
        self.security_group = security_group
        self.auth_dat_file_path = auth_dat_file
        self.secret_file_path = secret_file
        self.project_id = project_id
        if not project_id:
            return None

        # Perform OAuth 2.0 authorization.
        flow = flow_from_clientsecrets(self.secret_file_path,
                                       scope=self.GCE_SCOPE)
        auth_storage = Storage(self.auth_dat_file_path)
        credentials = auth_storage.get()

        if credentials is None or credentials.invalid:
            credentials = run(flow, auth_storage)
        http = httplib2.Http()
        self.auth_http = credentials.authorize(http)

        # Build service object
        self.gce_service = build('compute', self.API_VERSION)
        self.project_url = self.GCE_URL + self.project_id
        # Call super class's init
        cluster_tools.ICluster.__init__(self,
                                        name=name,
                                        host=host,
                                        cloud_type=cloud_type,
                                        memory=memory,
                                        max_vm_mem=max_vm_mem,
                                        cpu_archs=cpu_archs,
                                        networks=networks,
                                        vm_slots=vm_slots,
                                        cpu_cores=cpu_cores,
                                        storage=storage,
                                        hypervisor=hypervisor,
                                        boot_timeout=boot_timeout)

    def vm_create(self,
                  vm_name,
                  vm_type,
                  vm_user,
                  vm_networkassoc,
                  vm_cpuarch,
                  vm_image,
                  vm_mem,
                  vm_cores,
                  vm_storage,
                  customization=None,
                  vm_keepalive=0,
                  instance_type="",
                  maximum_price=0,
                  job_per_core=False,
                  securitygroup=[]):
        try:
            vm_ami = vm_image[self.network_address]
        except:
            log.debug("No AMI for %s, trying default" % self.network_address)
            try:
                vm_ami = vm_image["default"]
            except:
                log.exception("Can't find a suitable AMI")
                return
        # Construct URLs
        #if instance_type:
        #    vm_instance_type = instance_type
        #else:
        #    vm_instance_type = self.DEFAULT_MACHINE_TYPE
        try:
            if self.name in instance_type.keys():
                i_type = instance_type[self.name]
            else:
                i_type = instance_type[self.network_address]
        except:
            log.debug("No instance type for %s, trying default" %
                      self.network_address)
            try:
                if self.name in self.DEFAULT_INSTANCE_TYPE_LIST.keys():
                    i_type = self.DEFAULT_INSTANCE_TYPE_LIST[self.name]
                else:
                    i_type = self.DEFAULT_INSTANCE_TYPE_LIST[
                        self.network_address]
            except:
                log.debug(
                    "No default instance type found for %s, trying single default"
                    % self.network_address)
                i_type = self.DEFAULT_MACHINE_TYPE
        instance_type = i_type
        if vm_image:
            vm_image_name = vm_ami
        else:
            vm_image_name = self.DEFAULT_IMAGE

        image_url = '%s%s/global/images/%s' % (self.GCE_URL, self.project_id,
                                               vm_image_name)

        machine_type_url = '%s/zones/%s/machineTypes/%s' % (
            self.project_url, self.DEFAULT_ZONE, vm_instance_type)
        #zone_url = '%s/zones/%s' % (self.project_url, self.DEFAULT_ZONE)
        network_url = '%s/global/networks/%s' % (self.project_url,
                                                 self.DEFAULT_NETWORK)

        if customization:
            user_data = nimbus_xml.ws_optional(customization)
        else:
            user_data = ""

        next_instance_name = self.generate_next_instance_name()
        # Construct the request body
        instance = {
            'name':
            next_instance_name,
            'machineType':
            machine_type_url,
            'image':
            image_url,
            'networkInterfaces': [{
                'accessConfigs': [{
                    'type': 'ONE_TO_ONE_NAT',
                    'name': 'External NAT'
                }],
                'network':
                network_url
            }],
            'serviceAccounts': [{
                'email': self.DEFAULT_SERVICE_EMAIL,
                'scopes': self.DEFAULT_SCOPES
            }],
            'metadata': {
                'items': [
                    {
                        'key': 'user-data',
                        'value': user_data,
                    },
                    #   {
                    #    'key': 'startup-script',
                    #    'value': user_script,
                    #    }
                ]
            }
        }

        # Create the instance
        response = None
        request = self.gce_service.instances().insert(project=self.project_id,
                                                      body=instance,
                                                      zone=self.DEFAULT_ZONE)
        try:
            response = request.execute(self.auth_http)
            response = self._blocking_call(self.gce_service, self.auth_http,
                                           response)
        except Exception, e:
            log.error("Error creating VM on gce: %s" % e)
            pass

        if response and 'targetId' in response:
            target_id = response['targetId']
        elif response:
            #print 'targetID missing'
            #print response
            return
        else:
            #print 'no response'
            return
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            #TODO: this is kind of pointless with EC2..., but the resource code depends on it
            log.debug("Cluster memory list has no sufficient memory " +\
                      "entries (Not supposed to happen). Returning error.")
            return self.ERROR
        new_vm = cluster_tools.VM(
            name=next_instance_name,
            vmtype=vm_type,
            user=vm_user,
            clusteraddr=self.network_address,
            id=target_id,
            cloudtype=self.cloud_type,
            network=vm_networkassoc,
            hostname=self.construct_hostname(next_instance_name),
            cpuarch=vm_cpuarch,
            image=vm_image,
            mementry=vm_mementry,
            memory=vm_mem,
            cpucores=vm_cores,
            storage=vm_storage,
            keep_alive=vm_keepalive,
            job_per_core=job_per_core)

        try:
            self.resource_checkout(new_vm)
        except:
            log.exception(
                "Unexpected Error checking out resources when creating a VM. Programming error?"
            )
            self.vm_destroy(new_vm, reason="Failed Resource checkout")
            return self.ERROR

        self.vms.append(new_vm)
        return 0
Exemple #6
0
    def vm_create(self,
                  vm_name,
                  vm_type,
                  vm_user,
                  vm_networkassoc,
                  vm_cpuarch,
                  vm_image,
                  vm_mem,
                  vm_cores,
                  vm_storage,
                  customization=None,
                  vm_keepalive=0,
                  instance_type="",
                  job_per_core=False,
                  securitygroup=[],
                  key_name=""):
        """ Create a VM on OpenStack."""
        nova = self._get_creds_nova()
        if len(key_name) > 0:
            if not nova.keypairs.findall(name=key_name):
                key_name = ""
        try:
            image = vm_image[self.name]
        except:
            try:
                image = vm_image[self.network_address]
            except:
                try:
                    vm_default_ami = _attr_list_to_dict(config.default_VMAMI)
                    if self.name in vm_default_ami.keys():
                        image = vm_default_ami[self.name]
                    else:
                        image = vm_default_ami[self.network_address]
                except:
                    try:
                        image = vm_default_ami["default"]
                    except:
                        log.exception("Can't find a suitable AMI")
                        return
        try:
            if self.name in instance_type.keys():
                i_type = instance_type[self.name]
            else:
                i_type = instance_type[self.network_address]
        except:
            log.debug("No instance type for %s, trying default" %
                      self.network_address)
            try:
                if self.name in self.DEFAULT_INSTANCE_TYPE_LIST.keys():
                    i_type = self.DEFAULT_INSTANCE_TYPE_LIST[self.name]
                else:
                    i_type = self.DEFAULT_INSTANCE_TYPE_LIST[
                        self.network_address]
            except:
                log.debug(
                    "No default instance type found for %s, trying single default"
                    % self.network_address)
                i_type = self.DEFAULT_INSTANCE_TYPE

        instance = nova.servers.create(image=image,
                                       flavor=i_type,
                                       key_name=key_name)
        #print instance
        instance_id = instance.id

        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            #TODO: this is kind of pointless with EC2...
            log.debug("Cluster memory list has no sufficient memory " +\
                      "entries (Not supposed to happen). Returning error.")
            return self.ERROR
        log.verbose("vm_create - Memory entry found in given cluster: %d" %
                    vm_mementry)
        new_vm = cluster_tools.VM(name=vm_name,
                                  id=instance_id,
                                  vmtype=vm_type,
                                  user=vm_user,
                                  clusteraddr=self.network_address,
                                  cloudtype=self.cloud_type,
                                  network=vm_networkassoc,
                                  cpuarch=vm_cpuarch,
                                  image=vm_image,
                                  memory=vm_mem,
                                  mementry=vm_mementry,
                                  cpucores=vm_cores,
                                  storage=vm_storage,
                                  keep_alive=vm_keepalive,
                                  job_per_core=job_per_core)

        try:
            self.resource_checkout(new_vm)
        except:
            log.exception(
                "Unexpected Error checking out resources when creating a VM. Programming error?"
            )
            self.vm_destroy(new_vm, reason="Failed Resource checkout")
            return self.ERROR

        self.vms.append(new_vm)

        return 0