def CreateScratchDisk(self, disk_spec): """Create a VM's scratch disk. Args: disk_spec: virtual_machine.BaseDiskSpec object of the disk. Raises: CreationError: If an NFS disk is listed but the NFS service not created. """ # Instantiate the disk(s) that we want to create. disks = [] for _ in range(disk_spec.num_striped_disks): if disk_spec.disk_type == disk.NFS: data_disk = self._GetNfsService().CreateNfsDisk() else: data_disk = aws_disk.AwsDisk(disk_spec, self.zone, self.machine_type) if disk_spec.disk_type == disk.LOCAL: data_disk.device_letter = chr( ord(DRIVE_START_LETTER) + self.local_disk_counter) # Local disk numbers start at 1 (0 is the system disk). data_disk.disk_number = self.local_disk_counter + 1 self.local_disk_counter += 1 if self.local_disk_counter > self.max_local_disks: raise errors.Error('Not enough local disks.') elif disk_spec.disk_type == disk.NFS: pass else: # Remote disk numbers start at 1 + max_local disks (0 is the system disk # and local disks occupy [1, max_local_disks]). data_disk.disk_number = (self.remote_disk_counter + 1 + self.max_local_disks) self.remote_disk_counter += 1 disks.append(data_disk) self._CreateScratchDiskFromDisks(disk_spec, disks)
def _GetDeviceFromVDisk(self, vm): """Gets device path for the volume.""" cmd = 'fdisk -l' endtime = time.time() + _MAX_FIND_DEVICE_SECONDS self.device_path = None while time.time() < endtime: stdout, _ = self.attached_vm.RemoteCommand(cmd, should_log=True) # parse for lines that contain disk size in bytes disks = re.findall(r'\Disk (\S+): .* (\d+) bytes,', stdout) for device_path, disk_size in disks: logging.info('disk_path: %s, disk_size: %s', device_path, disk_size) if int(disk_size) >= self.disk_size * 1e9: if device_path not in vm.device_paths_detected: self.device_path = device_path vm.device_paths_detected.add(device_path) logging.info('device path found: %s', self.device_path) break if self.device_path: break time.sleep(10) if not self.device_path: raise errors.Error('IBMCLOUD ERROR: failed to find device path.')
def _AllocateBootDisk(self, disk_spec): """Allocate the VM's boot, or system, disk as the scratch disk. Boot disk can only be allocated once. If multiple data disks are required it will raise an error. Args: disk_spec: virtual_machine.BaseDiskSpec object of the disk. Raises: errors.Error when boot disk has already been allocated as a data disk. """ if self.boot_disk_allocated: raise errors.Error('Only one boot disk can be created per VM') device_path = '/dev/%s' % self.boot_device['name'] scratch_disk = rackspace_disk.RackspaceBootDisk( disk_spec, self.zone, self.project, device_path, self.image) self.boot_disk_allocated = True self.scratch_disks.append(scratch_disk) scratch_disk.Create() path = disk_spec.mount_point mk_cmd = 'sudo mkdir -p {0}; sudo chown -R $USER:$USER {0};'.format( path) self.RemoteCommand(mk_cmd)
def _Create(self): """Create a ProfitBricks VM instance.""" # Grab ssh pub key to inject into new VM with open(self.ssh_public_key) as f: public_key = f.read().rstrip('\n') if self.image_alias is None: # Find an Ubuntu image that matches our location self.image = util.ReturnImage(self.header, self.location) # Create server POST body new_server = { 'properties': { 'name': self.name, 'ram': self.ram, 'cores': self.cores, 'availabilityZone': self.zone }, 'entities': { 'volumes': { 'items': [{ 'properties': { 'size': self.boot_volume_size, 'name': 'boot volume', 'image': self.image, 'imageAlias': self.image_alias, 'type': self.boot_volume_type, 'sshKeys': [public_key], 'availabilityZone': self.availability_zone } }] }, 'nics': { 'items': [{ 'properties': { 'name': 'nic1', 'lan': self.lan_id } }] } } } # Build Server URL url = '%s/datacenters/%s/servers' % (PROFITBRICKS_API, self.dc_id) # Provision Server r = util.PerformRequest('post', url, self.header, json=new_server) logging.info('Creating VM: %s' % self.name) # Parse Required values from response self.server_status = r.headers['Location'] response = r.json() self.server_id = response['id'] # The freshly created server will be in a locked and unusable # state for a while, and it cannot be deleted or modified in # this state. Wait for the action to finish and check the # reported result. if not self._WaitUntilReady(self.server_status): raise errors.Error('VM creation failed, see log.')
def __init__(self, benchmark_info): if (FLAGS.benchmark_config_pair and benchmark_info['name'] in FLAGS.benchmark_config_pair.keys()): # TODO(user): Unify naming between config_reader and # perfkitbenchmarker. self.config = config_reader.ConfigLoader( FLAGS.benchmark_config_pair[benchmark_info['name']]) self.networks = {} self.firewalls = {} self.vms = [] self.vm_dict = {'default': []} self.benchmark_name = benchmark_info['name'] if hasattr(self, 'config'): config_dict = {} for section in self.config._config.sections(): config_dict[section] = self.config.GetSectionOptionsAsDictionary( section) self.cloud = config_dict['cluster']['type'] self.project = config_dict['cluster']['project'] self.zones = [config_dict['cluster']['zone']] self.image = [] self.machine_type = [] for node in self.config.node_sections: self.vm_dict[node.split(':')[1]] = [] args = [((config_dict[node], node.split(':')[1]), {}) for node in self.config.node_sections] vm_util.RunThreaded( self.CreateVirtualMachineFromNodeSection, args) self.num_vms = len(self.vms) self.image = ','.join(self.image) self.zones = ','.join(self.zones) self.machine_type = ','.join(self.machine_type) else: self.cloud = FLAGS.cloud self.project = FLAGS.project self.zones = FLAGS.zones self.image = FLAGS.image self.machine_type = FLAGS.machine_type if benchmark_info['num_machines'] is None: self.num_vms = FLAGS.num_vms else: self.num_vms = benchmark_info['num_machines'] self.scratch_disk = benchmark_info['scratch_disk'] self.scratch_disk_size = FLAGS.scratch_disk_size self.scratch_disk_type = FLAGS.scratch_disk_type self.scratch_disk_iops = FLAGS.scratch_disk_iops self.vms = [ self.CreateVirtualMachine( self.zones[min(index, len(self.zones) - 1)]) for index in range(self.num_vms)] self.vm_dict['default'] = self.vms for vm in self.vms: # If we are using local disks and num_striped_disks has not been # set, then we want to set it to stripe all local disks together. if (FLAGS.scratch_disk_type == disk.LOCAL and benchmark_info['scratch_disk'] and not FLAGS['num_striped_disks'].present): num_striped_disks = (vm.max_local_disks // benchmark_info['scratch_disk']) if num_striped_disks == 0: raise errors.Error( 'Not enough local disks to run benchmark "%s". It requires at ' 'least %d local disk(s). The specified machine type has %d ' 'local disk(s).' % (benchmark_info['name'], int(benchmark_info['scratch_disk']), vm.max_local_disks)) else: num_striped_disks = FLAGS.num_striped_disks for i in range(benchmark_info['scratch_disk']): mount_point = '%s%d' % (FLAGS.scratch_dir, i) disk_spec = disk.BaseDiskSpec( self.scratch_disk_size, self.scratch_disk_type, mount_point, self.scratch_disk_iops, num_striped_disks) vm.disk_specs.append(disk_spec) self.file_name = '%s/%s' % (vm_util.GetTempDir(), benchmark_info['name']) self.deleted = False self.always_call_cleanup = False
def GetDevicePath(self): """Get device path.""" # In case of LocalDisk, host's disk is mounted (empty directory from the # host is mounted to the docker instance) and we intentionally # prevent from formatting the device. raise errors.Error('GetDevicePath not supported for Kubernetes local disk')
def _GetKeyFromNetworkSpec(cls, spec): """Returns a key used to register Network instances.""" if cls.CLOUD is None: raise errors.Error('Networks should have CLOUD attributes.') return (cls.CLOUD, spec.zone)
def Provision(self): """Prepares the VMs and networks necessary for the benchmark to run.""" # Create capacity reservations if the cloud supports it. Note that the # capacity reservation class may update the VMs themselves. This is true # on AWS, because the VM needs to be aware of the capacity reservation id # before its Create() method is called. Furthermore, if the user does not # specify an AWS zone, but a region instead, the AwsCapacityReservation # class will make a reservation in a zone that has sufficient capacity. # In this case the VM's zone attribute, and the VMs network instance # need to be updated as well. if self.capacity_reservations: vm_util.RunThreaded(lambda res: res.Create(), self.capacity_reservations) # Sort networks into a guaranteed order of creation based on dict key. # There is a finite limit on the number of threads that are created to # provision networks. Until support is added to provision resources in an # order based on dependencies, this key ordering can be used to avoid # deadlock by placing dependent networks later and their dependencies # earlier. networks = [ self.networks[key] for key in sorted(six.iterkeys(self.networks)) ] vm_util.RunThreaded(lambda net: net.Create(), networks) # VPC peering is currently only supported for connecting 2 VPC networks if self.vpc_peering: if len(networks) > 2: raise errors.Error( 'Networks of size %d are not currently supported.' % (len(networks))) # Ignore Peering for one network elif len(networks) == 2: networks[0].Peer(networks[1]) if self.container_registry: self.container_registry.Create() for container_spec in six.itervalues(self.container_specs): if container_spec.static_image: continue container_spec.image = self.container_registry.GetOrBuild( container_spec.image) if self.container_cluster: self.container_cluster.Create() # do after network setup but before VM created if self.nfs_service and self.nfs_service.CLOUD != nfs_service.UNMANAGED: self.nfs_service.Create() if self.smb_service: self.smb_service.Create() for placement_group_object in self.placement_groups.values(): placement_group_object.Create() if self.vms: # We separate out creating, booting, and preparing the VMs into two phases # so that we don't slow down the creation of all the VMs by running # commands on the VMs that booted. vm_util.RunThreaded( self.CreateAndBootVm, self.vms, post_task_delay=FLAGS.create_and_boot_post_task_delay) if self.nfs_service and self.nfs_service.CLOUD == nfs_service.UNMANAGED: self.nfs_service.Create() vm_util.RunThreaded(self.PrepareVmAfterBoot, self.vms) sshable_vms = [ vm for vm in self.vms if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES ] sshable_vm_groups = {} for group_name, group_vms in six.iteritems(self.vm_groups): sshable_vm_groups[group_name] = [ vm for vm in group_vms if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES ] vm_util.GenerateSSHConfig(sshable_vms, sshable_vm_groups) if self.spark_service: self.spark_service.Create() if self.dpb_service: self.dpb_service.Create() if hasattr(self, 'relational_db') and self.relational_db: self.relational_db.SetVms(self.vm_groups) self.relational_db.Create() if self.spanner: self.spanner.Create() if self.tpus: vm_util.RunThreaded(lambda tpu: tpu.Create(), self.tpus) if self.edw_service: if (not self.edw_service.user_managed and self.edw_service.SERVICE_TYPE == 'redshift'): # The benchmark creates the Redshift cluster's subnet group in the # already provisioned virtual private cloud (vpc). for network in networks: if network.__class__.__name__ == 'AwsNetwork': self.edw_service.cluster_subnet_group.subnet_id = network.subnet.id self.edw_service.Create() if self.vpn_service: self.vpn_service.Create()
def GetDevicePath(self): raise errors.Error('GetDevicePath not supported for Docker.')
def GetDevicePath(self): # The local disk is always the boot disk, and it cannot be # partitioned or reformatted, so we don't support GetDevicePath(). raise errors.Error( 'GetDevicePath not supported for DigitalOcean local disks.')
def GetDevicePath(self): # DigitalOcean VMs only have a single disk block device which is # in use for the live filesystem, so it's not usable as a scratch # disk device. raise errors.Error('GetDevicePath not supported for DigitalOcean.')