def test_view_builder_list(self): view_builder = views_types.ViewBuilder() now = timeutils.isotime() raw_volume_types = [] for i in range(0, 10): raw_volume_types.append( dict( name='new_type', deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.index(request, raw_volume_types) self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict( name='new_type', extra_specs={}, id=42 + i ) self.assertDictMatch(output['volume_types'][i], expected_volume_type)
def _build_rate_limit(self, rate_limit): _get_utc = datetime.datetime.utcfromtimestamp next_avail = _get_utc(rate_limit["resetTime"]) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": timeutils.isotime(at=next_avail), }
def _schedule(self, context, request_spec, filter_properties=None): """ Returns a list of hosts that meet the required specs, ordered by their fitness. """ s = super(PowerVCSchedulerDriver, self) hosts = s._schedule(context, request_spec, filter_properties=filter_properties) if not hosts: # no hosts fitted. At least we cannot find the hosts # that matches capacity requirement. Log an error to # to volume meta data. # collect request related information volume_id = request_spec['volume_id'] vol_properties = request_spec['volume_properties'] req_size = vol_properties['size'] # collect host_state information elevated = context.elevated() all_hosts = self.host_manager.get_all_host_states(elevated) # For now we are only focusing on the capacity. req_info = (_('volume request: ' 'requested size: %(size)s. ') % {'size': req_size}) info = '' for hstate_info in all_hosts: ts = timeutils.isotime(at=hstate_info.updated) info += (_("{host: %(hostname)s, free_capacity: %(free_cap)s, " "total_capacity: %(total)s, reserved_percentage:" " %(reserved)s, last update: %(time_updated)s}") % {'hostname': hstate_info.host, 'free_cap': hstate_info.free_capacity_gb, 'total': hstate_info.total_capacity_gb, 'reserved': hstate_info.reserved_percentage, 'time_updated': ts}) if len(info) > 0: msg = (_('request exceeds capacity: ' + req_info + ('available capacity: %(info)s') % {'info': info})) else: msg = (_("No storage has been registered. " + req_info)) LOG.error(("Schedule Failure: volume_id: %s, " % volume_id) + msg) meta_data = {'schedule Failure description': msg[:255]} db.volume_update(context, volume_id, {'metadata': meta_data}) return None else: return hosts
def test_view_builder_show(self): view_builder = views_types.ViewBuilder() now = timeutils.isotime() raw_volume_type = dict( name="new_type", deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42 ) request = fakes.HTTPRequest.blank("/v2") output = view_builder.show(request, raw_volume_type) self.assertIn("volume_type", output) expected_volume_type = dict(name="new_type", extra_specs={}, id=42) self.assertDictMatch(output["volume_type"], expected_volume_type)
def test_view_builder_show(self): view_builder = views_types.ViewBuilder() now = timeutils.isotime() raw_volume_type = dict(name='new_type', deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, id=42) request = fakes.HTTPRequest.blank("/v1") output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict(name='new_type', extra_specs={}, id=42) self.assertDictMatch(output['volume_type'], expected_volume_type)
def _heal_volume_status(self, context): TIME_SHIFT_TOLERANCE = 3 heal_interval = CONF.volume_sync_interval if not heal_interval: return curr_time = time.time() LOG.info(_('Cascade info: last volume update time:%s'), self._last_info_volume_state_heal) LOG.info(_('Cascade info: heal interval:%s'), heal_interval) LOG.info(_('Cascade info: curr_time:%s'), curr_time) if self._last_info_volume_state_heal + heal_interval > curr_time: return self._last_info_volume_state_heal = curr_time cinderClient = self._get_cinder_cascaded_admin_client() try: if self._change_since_time is None: search_opt = {'all_tenants': True} volumes = cinderClient.volumes.list(search_opts=search_opt) volumetypes = cinderClient.volume_types.list() LOG.info(_('Cascade info: change since time is none,' 'volumes:%s'), volumes) else: change_since_isotime = \ timeutils.parse_isotime(self._change_since_time) changesine_timestamp = change_since_isotime - \ datetime.timedelta(seconds=TIME_SHIFT_TOLERANCE) timestr = time.mktime(changesine_timestamp.timetuple()) new_change_since_isotime = \ timeutils.iso8601_from_timestamp(timestr) search_op = {'all_tenants': True, 'changes-since': new_change_since_isotime} volumes = cinderClient.volumes.list(search_opts=search_op) volumetypes = cinderClient.volume_types.list() LOG.info(_('Cascade info: search time is not none,' 'volumes:%s'), volumes) self._change_since_time = timeutils.isotime() if len(volumes) > 0: LOG.debug(_('Updated the volumes %s'), volumes) for volume in volumes: volume_id = volume._info['metadata']['logicalVolumeId'] volume_status = volume._info['status'] if volume_status == "in-use": self.db.volume_update(context, volume_id, {'status': volume._info['status'], 'attach_status': 'attached', 'attach_time': timeutils.strtime() }) elif volume_status == "available": self.db.volume_update(context, volume_id, {'status': volume._info['status'], 'attach_status': 'detached', 'instance_uuid': None, 'attached_host': None, 'mountpoint': None, 'attach_time': None }) else: self.db.volume_update(context, volume_id, {'status': volume._info['status']}) LOG.info(_('Cascade info: Updated the volume %s status from' 'cinder-proxy'), volume_id) vol_types = self.db.volume_type_get_all(context, inactive=False) for volumetype in volumetypes: volume_type_name = volumetype._info['name'] if volume_type_name not in vol_types.keys(): extra_specs = volumetype._info['extra_specs'] self.db.volume_type_create( context, dict(name=volume_type_name, extra_specs=extra_specs)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to sys volume status to db.'))