def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance.""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = '2' # m1.tiny inst['ami_launch_index'] = 0 instance_id = db.instance_create(self.context, inst)['id'] mountpoint = "/dev/sdf" volume_id = self._create_volume() self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: db.volume_attached(self.context, volume_id, instance_id, mountpoint) else: self.compute.attach_volume(self.context, instance_id, volume_id, mountpoint) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) instance_ref = db.volume_get_instance(self.context, volume_id) self.assertEqual(instance_ref['id'], instance_id) self.assertRaises(exception.Error, self.volume.delete_volume, self.context, volume_id) if FLAGS.fake_tests: db.volume_detached(self.context, volume_id) else: self.compute.detach_volume(self.context, instance_id, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) db.instance_destroy(self.context, instance_id)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" elevated = context.elevated() volume_ref = db.volume_get(context, volume_id) availability_zone = volume_ref.get('availability_zone') zone, host = None, None if availability_zone: zone, _x, host = availability_zone.partition(':') if host and context.is_admin: service = db.service_get_by_args(elevated, host, 'engine-volume') if not self.service_is_up(service): raise exception.WillNotSchedule(host=host) driver.cast_to_volume_host(context, host, 'create_volume', volume_id=volume_id, **_kwargs) return None results = db.service_get_all_volume_sorted(elevated) if zone: results = [(service, gigs) for (service, gigs) in results if service['availability_zone'] == zone] for result in results: (service, volume_gigabytes) = result if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: msg = _("Not enough allocatable volume gigabytes remaining") raise exception.NoValidHost(reason=msg) if self.service_is_up(service): driver.cast_to_volume_host(context, service['host'], 'create_volume', volume_id=volume_id, **_kwargs) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg)
def test_create_volume_from_snapshot(self): """Test volume can be created from a snapshot.""" volume_src_id = self._create_volume() self.volume.create_volume(self.context, volume_src_id) snapshot_id = self._create_snapshot(volume_src_id) self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) self.assertEqual(volume_dst_id, db.volume_get( context.get_admin_context(), volume_dst_id).id) self.assertEqual(snapshot_id, db.volume_get( context.get_admin_context(), volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src_id)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks the best host based on requested drive type capability.""" volume_ref = db.volume_get(context, volume_id) host = self._check_host_enforcement(context, volume_ref['availability_zone']) if host: driver.cast_to_volume_host(context, host, 'create_volume', volume_id=volume_id, **_kwargs) return None volume_type_id = volume_ref['volume_type_id'] if volume_type_id: volume_type = volume_types.get_volume_type(context, volume_type_id) if volume_type_id is None or\ volume_types.is_vsa_volume(volume_type_id, volume_type): LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) self._print_capabilities_info() drive_type = { 'name': volume_type['extra_specs'].get('drive_name'), 'type': volume_type['extra_specs'].get('drive_type'), 'size': int(volume_type['extra_specs'].get('drive_size')), 'rpm': volume_type['extra_specs'].get('drive_rpm'), } LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) request_spec = {'size': volume_ref['size'], 'drive_type': drive_type} hosts = self._filter_hosts("volume", request_spec) try: (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) except Exception: LOG.exception(_("Error creating volume")) if volume_ref['to_vsa_id']: db.vsa_update(context, volume_ref['to_vsa_id'], dict(status=VsaState.FAILED)) raise if host: driver.cast_to_volume_host(context, host, 'create_volume', volume_id=volume_id, **_kwargs)
def test_create_volume_from_snapshot(self): """Test volume can be created from a snapshot.""" volume_src_id = self._create_volume() self.volume.create_volume(self.context, volume_src_id) snapshot_id = self._create_snapshot(volume_src_id) self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) volume_dst_id = self._create_volume(0, snapshot_id) self.volume.create_volume(self.context, volume_dst_id, snapshot_id) self.assertEqual( volume_dst_id, db.volume_get(context.get_admin_context(), volume_dst_id).id) self.assertEqual( snapshot_id, db.volume_get(context.get_admin_context(), volume_dst_id).snapshot_id) self.volume.delete_volume(self.context, volume_dst_id) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src_id)
def test_create_delete_volume(self): """Test volume can be created and deleted.""" volume_id = self._create_volume() self.volume.create_volume(self.context, volume_id) self.assertEqual( volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id)
def test_create_delete_volume(self): """Test volume can be created and deleted.""" volume_id = self._create_volume() self.volume.create_volume(self.context, volume_id) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id)
def _get_target(volume_id): """ Gets iscsi name and portal from volume name and host. For this method to work the following are needed: 1) volume_ref['host'] must resolve to something rather than loopback """ volume_ref = db.volume_get(context.get_admin_context(), volume_id) result = (None, None) try: (r, _e) = utils.execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume_ref['host'], run_as_root=True) except exception.ProcessExecutionError, exc: LOG.exception(exc)
def _attach_volume(self): """Attach volumes to an instance. This function also sets a fake log message.""" volume_id_list = [] for index in xrange(3): vol = {} vol['size'] = 0 vol_ref = db.volume_create(self.context, vol) self.volume.create_volume(self.context, vol_ref['id']) vol_ref = db.volume_get(self.context, vol_ref['id']) # each volume has a different mountpoint mountpoint = "/dev/sd" + chr((ord('b') + index)) db.volume_attached(self.context, vol_ref['id'], self.instance_id, mountpoint) volume_id_list.append(vol_ref['id']) return volume_id_list