def test_create_snapshot(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(solidfire.SolidFireDriver, '_get_model_info', self.fake_get_model_info) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testsnap = {'project_id': 'testprjid', 'name': 'testvol', 'volume_size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.create_volume(testvol) sfv.create_snapshot(testsnap)
def _do_update_node(self, node_id, values): with _session_for_write(): query = model_query(models.Node) query = add_identity_filter(query, node_id) try: ref = query.with_lockmode('update').one() except NoResultFound: raise exception.NodeNotFound(node=node_id) # Prevent instance_uuid overwriting if values.get("instance_uuid") and ref.instance_uuid: raise exception.NodeAssociated( node=ref.uuid, instance=ref.instance_uuid) if 'provision_state' in values: values['provision_updated_at'] = timeutils.utcnow() if values['provision_state'] == states.INSPECTING: values['inspection_started_at'] = timeutils.utcnow() values['inspection_finished_at'] = None elif (ref.provision_state == states.INSPECTING and values['provision_state'] == states.MANAGEABLE): values['inspection_finished_at'] = timeutils.utcnow() values['inspection_started_at'] = None elif (ref.provision_state == states.INSPECTING and values['provision_state'] == states.INSPECTFAIL): values['inspection_started_at'] = None ref.update(values) return ref
def _copy_volume_with_file(src, dest, size_in_m): src_handle = src if isinstance(src, six.string_types): src_handle = _open_volume_with_path(src, 'rb') dest_handle = dest if isinstance(dest, six.string_types): dest_handle = _open_volume_with_path(dest, 'wb') if not src_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, source device unavailable.")) if not dest_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, destination device unavailable.")) start_time = timeutils.utcnow() _transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4) duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow())) if isinstance(src, six.string_types): src_handle.close() if isinstance(dest, six.string_types): dest_handle.close() mbps = (size_in_m / duration) LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at " "%(mbps).2f MB/s)."), {'size_in_m': size_in_m, 'mbps': mbps})
def setUp(self): super(TestInstanceNotification, self).setUp() self.test_keys = ['memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', 'swap'] self.flavor_values = {k: 123 for k in self.test_keys} instance_values = {k: 456 for k in self.test_keys} flavor = objects.Flavor(flavorid='test-flavor', name='test-flavor', disabled=False, projects=[], is_public=True, extra_specs={}, **self.flavor_values) info_cache = objects.InstanceInfoCache( network_info=network_model.NetworkInfo()) self.instance = objects.Instance( flavor=flavor, info_cache=info_cache, metadata={}, uuid=uuids.instance1, locked=False, auto_disk_config=False, system_metadata={}, **instance_values) self.payload = { 'bandwidth': {}, 'audit_period_ending': timeutils.utcnow(), 'audit_period_beginning': timeutils.utcnow(), }
def format_watch(watch): updated_at = watch.updated_at or timeutils.utcnow() result = { rpc_api.WATCH_ACTIONS_ENABLED: watch.rule.get( rpc_api.RULE_ACTIONS_ENABLED), rpc_api.WATCH_ALARM_ACTIONS: watch.rule.get( rpc_api.RULE_ALARM_ACTIONS), rpc_api.WATCH_TOPIC: watch.rule.get(rpc_api.RULE_TOPIC), rpc_api.WATCH_UPDATED_TIME: updated_at.isoformat(), rpc_api.WATCH_DESCRIPTION: watch.rule.get(rpc_api.RULE_DESCRIPTION), rpc_api.WATCH_NAME: watch.name, rpc_api.WATCH_COMPARISON: watch.rule.get(rpc_api.RULE_COMPARISON), rpc_api.WATCH_DIMENSIONS: watch.rule.get( rpc_api.RULE_DIMENSIONS) or [], rpc_api.WATCH_PERIODS: watch.rule.get(rpc_api.RULE_PERIODS), rpc_api.WATCH_INSUFFICIENT_ACTIONS: watch.rule.get(rpc_api.RULE_INSUFFICIENT_ACTIONS), rpc_api.WATCH_METRIC_NAME: watch.rule.get(rpc_api.RULE_METRIC_NAME), rpc_api.WATCH_NAMESPACE: watch.rule.get(rpc_api.RULE_NAMESPACE), rpc_api.WATCH_OK_ACTIONS: watch.rule.get(rpc_api.RULE_OK_ACTIONS), rpc_api.WATCH_PERIOD: watch.rule.get(rpc_api.RULE_PERIOD), rpc_api.WATCH_STATE_REASON: watch.rule.get(rpc_api.RULE_STATE_REASON), rpc_api.WATCH_STATE_REASON_DATA: watch.rule.get(rpc_api.RULE_STATE_REASON_DATA), rpc_api.WATCH_STATE_UPDATED_TIME: watch.rule.get( rpc_api.RULE_STATE_UPDATED_TIME, timeutils.utcnow()).isoformat(), rpc_api.WATCH_STATE_VALUE: watch.state, rpc_api.WATCH_STATISTIC: watch.rule.get(rpc_api.RULE_STATISTIC), rpc_api.WATCH_THRESHOLD: watch.rule.get(rpc_api.RULE_THRESHOLD), rpc_api.WATCH_UNIT: watch.rule.get(rpc_api.RULE_UNIT), rpc_api.WATCH_STACK_ID: watch.stack_id } return result
def sample(cls, expand=True): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='example', image_id='Fedora-k8s', flavor_id='m1.small', master_flavor_id='m1.small', dns_nameserver='8.8.1.1', keypair_id='keypair1', external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba', fixed_network='private', network_driver='libnetwork', volume_driver='cinder', apiserver_port=8080, docker_volume_size=25, cluster_distro='fedora-atomic', coe=fields.BayType.KUBERNETES, http_proxy='http://proxy.com:123', https_proxy='https://proxy.com:123', no_proxy='192.168.0.1,192.168.0.2,192.168.0.3', labels={'key1': 'val1', 'key2': 'val2'}, server_type='vm', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), public=False), return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def sample(cls, expand=True): sample = cls(uuid='fe78db47-9a37-4e9f-8572-804a10abc0aa', name='MyService', bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', labels={'label1': 'foo'}, selector={'label1': 'foo'}, ip='172.17.2.2', ports=[{"port": 88, "targetPort": 6379, "protocol": "TCP"}], manifest_url='file:///tmp/rc.yaml', manifest='''{ "metadata": { "name": "test", "labels": { "key": "value" } }, "spec": { "ports": [ { "port": 88, "targetPort": 6379, "protocol": "TCP" } ], "selector": { "bar": "foo" } } }''', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow()) return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def _wait_on_task_execution(self): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of 5 seconds while timeutils.delta_seconds(start, timeutils.utcnow()) < 5: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, res.status) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: time.sleep(0.05) continue else: break
def format_service(service): if service is None: return status = 'down' if service.updated_at is not None: if ((timeutils.utcnow() - service.updated_at).total_seconds() <= service.report_interval): status = 'up' else: if ((timeutils.utcnow() - service.created_at).total_seconds() <= service.report_interval): status = 'up' result = { SERVICE_ID: service.id, SERVICE_BINARY: service.binary, SERVICE_ENGINE_ID: service.engine_id, SERVICE_HOST: service.host, SERVICE_HOSTNAME: service.hostname, SERVICE_TOPIC: service.topic, SERVICE_REPORT_INTERVAL: service.report_interval, SERVICE_CREATED_AT: service.created_at, SERVICE_UPDATED_AT: service.updated_at, SERVICE_DELETED_AT: service.deleted_at, SERVICE_STATUS: status } return result
def _wait_for_stack_status(self, stack_identifier, status, failure_pattern='^.*_FAILED$', success_on_not_found=False): """ Waits for a Stack to reach a given status. Note this compares the full $action_$status, e.g CREATE_COMPLETE, not just COMPLETE which is exposed via the status property of Stack in heatclient """ fail_regexp = re.compile(failure_pattern) build_timeout = self.conf.build_timeout build_interval = self.conf.build_interval start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < build_timeout: try: stack = self.client.stacks.get(stack_identifier) except heat_exceptions.HTTPNotFound: if success_on_not_found: return # ignore this, as the resource may not have # been created yet else: if self._verify_status(stack, stack_identifier, status, fail_regexp): return time.sleep(build_interval) message = ('Stack %s failed to reach %s status within ' 'the required time (%s s).' % (stack_identifier, status, build_timeout)) raise exceptions.TimeoutException(message)
def test_snapshot_index_detail_serializer(self): serializer = snapshots.SnapshotsTemplate() raw_snapshots = [ dict( id="snap1_id", status="snap1_status", size=1024, created_at=timeutils.utcnow(), name="snap1_name", description="snap1_desc", volume_id="vol1_id", ), dict( id="snap2_id", status="snap2_status", size=1024, created_at=timeutils.utcnow(), name="snap2_name", description="snap2_desc", volume_id="vol2_id", ), ] text = serializer.serialize(dict(snapshots=raw_snapshots)) tree = etree.fromstring(text) self.assertEqual("snapshots", tree.tag) self.assertEqual(len(raw_snapshots), len(tree)) for idx, child in enumerate(tree): self._verify_snapshot(raw_snapshots[idx], child)
def sample(cls, expand=True): sample = cls(uuid='f978db47-9a37-4e9f-8572-804a10abc0aa', name='MyPod', desc='Pod - Description', bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', images=['MyImage'], labels={'name': 'foo'}, status='Running', host='10.0.0.3', manifest_url='file:///tmp/rc.yaml', manifest='''{ "metadata": { "name": "name_of_pod" }, "spec": { "containers": [ { "name": "test", "image": "test" } ] } }''', created_at=timeutils.utcnow(), updated_at=timeutils.utcnow()) return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def test_has_all_capabilities(self, _mock_service_get_all_by_topic, _mock_service_is_up): _mock_service_is_up.return_value = True services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), ] _mock_service_get_all_by_topic.return_value = services # Create host_manager again to let db.service_get_all_by_topic mock run self.host_manager = host_manager.HostManager() self.assertFalse(self.host_manager.has_all_capabilities()) host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs) self.assertTrue(self.host_manager.has_all_capabilities())
def discover(self, manager, param=None): """Discover resources to monitor.""" secs_from_last_update = 0 if self.last_run: secs_from_last_update = timeutils.delta_seconds( self.last_run, timeutils.utcnow(True)) instances = [] # NOTE(ityaptin) we update make a nova request only if # it's a first discovery or resources expired if not self.last_run or secs_from_last_update >= self.expiration_time: try: utc_now = timeutils.utcnow(True) since = self.last_run.isoformat() if self.last_run else None instances = self.nova_cli.instance_get_all_by_host( self.conf.host, since) self.last_run = utc_now except Exception: # NOTE(zqfan): instance_get_all_by_host is wrapped and will log # exception when there is any error. It is no need to raise it # again and print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance return self.instances.values()
def _get_doc_date(self): if self.ttl_seconds > 0: expire_delta = datetime.timedelta(seconds=self.ttl_seconds) doc_date = timeutils.utcnow() + expire_delta else: doc_date = timeutils.utcnow() return doc_date
def inspect_container(self, container_id): if container_id not in self._containers: return container = self._containers[container_id] info = { 'Args': [], 'Config': container['Config'], 'Created': str(timeutils.utcnow()), 'Id': container_id, 'Image': self._fake_id(), 'NetworkSettings': { 'Bridge': '', 'Gateway': '', 'IPAddress': '', 'IPPrefixLen': 0, 'PortMapping': None }, 'Path': 'bash', 'ResolvConfPath': '/etc/resolv.conf', 'State': { 'ExitCode': 0, 'Ghost': False, 'Pid': 0, 'Running': container['running'], 'StartedAt': str(timeutils.utcnow()) }, 'SysInitPath': '/tmp/docker', 'Volumes': {}, } return info
def test_create_clone(self, _mock_create_template_account, _mock_issue_api_request): _mock_issue_api_request.return_value = self.mock_stats_data _mock_create_template_account.return_value = 1 _fake_get_snaps = [{'snapshotID': 5, 'name': 'testvol'}] testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testvol_b = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sf_snapshots', return_value=_fake_get_snaps), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=[]), \ mock.patch.object(sfv, '_get_model_info', return_value={}): sfv.create_cloned_volume(testvol_b, testvol)
def test_event_get_all_with_sorting(self): cluster1 = shared.create_cluster(self.ctx, self.profile) event1 = self.create_event(self.ctx, entity=cluster1, timestamp=tu.utcnow(True), action='action2') event2 = self.create_event(self.ctx, entity=cluster1, timestamp=tu.utcnow(True), action='action3') event3 = self.create_event(self.ctx, entity=cluster1, timestamp=tu.utcnow(True), action='action1') events = db_api.event_get_all(self.ctx, sort='timestamp') self.assertEqual(event1.id, events[0].id) self.assertEqual(event2.id, events[1].id) self.assertEqual(event3.id, events[2].id) events = db_api.event_get_all(self.ctx, sort='timestamp:desc') self.assertEqual(event1.id, events[2].id) self.assertEqual(event2.id, events[1].id) self.assertEqual(event3.id, events[0].id) events = db_api.event_get_all(self.ctx, sort='action') self.assertEqual(event1.id, events[1].id) self.assertEqual(event2.id, events[2].id) self.assertEqual(event3.id, events[0].id) events = db_api.event_get_all(self.ctx, sort='action:desc') self.assertEqual(event1.id, events[1].id) self.assertEqual(event2.id, events[0].id) self.assertEqual(event3.id, events[2].id)
def upgrade(): op.add_column( 'order_retry_tasks', sa.Column( 'created_at', sa.DateTime(), nullable=False, server_default=str(timeutils.utcnow()))) op.add_column( 'order_retry_tasks', sa.Column( 'deleted', sa.Boolean(), nullable=False, server_default='0')) op.add_column( 'order_retry_tasks', sa.Column('deleted_at', sa.DateTime(), nullable=True)) op.add_column( 'order_retry_tasks', sa.Column( 'status', sa.String(length=20), nullable=False, server_default=m.States.PENDING)) op.add_column( 'order_retry_tasks', sa.Column( 'updated_at', sa.DateTime(), nullable=False, server_default=str(timeutils.utcnow())))
def treat_metric(self, metric_name, metric_type, value, sampling): metric_name += "|" + metric_type if metric_type == "ms": if sampling is not None: raise ValueError( "Invalid sampling for ms: `%d`, should be none" % sampling) self.times[metric_name] = storage.Measure( timeutils.utcnow(), value) elif metric_type == "g": if sampling is not None: raise ValueError( "Invalid sampling for g: `%d`, should be none" % sampling) self.gauges[metric_name] = storage.Measure( timeutils.utcnow(), value) elif metric_type == "c": sampling = 1 if sampling is None else sampling if metric_name in self.counters: current_value = self.counters[metric_name].value else: current_value = 0 self.counters[metric_name] = storage.Measure( timeutils.utcnow(), current_value + (value * (1 / sampling))) # TODO(jd) Support "set" type # elif metric_type == "s": # pass else: raise ValueError("Unknown metric type `%s'" % metric_type)
def test_archive_then_purge_by_date(self): server = self._create_server() server_id = server['id'] self._delete_server(server_id) results, deleted_ids = db.archive_deleted_rows(max_rows=1000) self.assertEqual([server_id], deleted_ids) pre_purge_results = self._get_table_counts() past = timeutils.utcnow() - datetime.timedelta(hours=1) admin_context = context.get_admin_context() deleted = sqlalchemy_api.purge_shadow_tables(admin_context, past) # Make sure we didn't delete anything if the marker is before # we started self.assertEqual(0, deleted) results = self._get_table_counts() # Nothing should be changed if we didn't purge anything self.assertEqual(pre_purge_results, results) future = timeutils.utcnow() + datetime.timedelta(hours=1) deleted = sqlalchemy_api.purge_shadow_tables(admin_context, future) # Make sure we deleted things when the marker is after # we started self.assertNotEqual(0, deleted) results = self._get_table_counts() # There should be no rows in any table if we purged everything self.assertFalse(any(results.values()))
def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self, req_mock): oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%c")}, {"host_name": "host2", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%D")}, # This is just a broken date to ensure that # we're not just arbitrarily accepting any # date format. ]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) host = fakes.FakeHostState('host1', 'host1', {}) bad_host = fakes.FakeHostState('host2', 'host2', {}) self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) self.assertFalse(self.filt_cls.host_passes(bad_host, spec_obj))
def test_create_clone(self): self.stubs.Set(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.stubs.Set(solidfire.SolidFireDriver, '_get_model_info', self.fake_get_model_info) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} testvol_b = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} with mock.patch.object(solidfire.SolidFireDriver, '_get_sf_snapshots', return_value=[]): sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.create_cloned_volume(testvol_b, testvol)
def _pre_upgrade_056(self, engine): raw_template = utils.get_table(engine, 'raw_template') templ = [] for i in range(900, 903, 1): t = dict(id=i, template='{}', files='{}') engine.execute(raw_template.insert(), [t]) templ.append(t) user_creds = utils.get_table(engine, 'user_creds') user = [dict(id=uid, username='******', password='******', tenant='test_project', auth_url='bla', tenant_id=str(uuid.uuid4()), trust_id='', trustor_user_id='') for uid in range(900, 903)] engine.execute(user_creds.insert(), user) stack = utils.get_table(engine, 'stack') stack_ids = [('967aaefa-152e-405d-b13a-35d4c816390c', 0), ('9e9debab-a303-4f29-84d3-c8165647c47e', 1), ('9a4bd1e9-8b21-46cd-964a-f66cb1cfa2f9', 2)] data = [dict(id=ll_id, name=ll_id, raw_template_id=templ[templ_id]['id'], user_creds_id=user[templ_id]['id'], username='******', disable_rollback=True, parameters='test_params', created_at=timeutils.utcnow(), deleted_at=None) for ll_id, templ_id in stack_ids] data[-1]['deleted_at'] = timeutils.utcnow() engine.execute(stack.insert(), data) return data
def _get_or_create_lock(name, session, nested, retry=0): if nested: session.begin_nested() else: session.begin() existing = session.query(Lock).get(name) if existing is None: try: # no lock found, creating a new one lock = Lock(id=name, ts=timeutils.utcnow()) lock.save(session) return session.transaction # lock created and acquired except exception.DBDuplicateEntry: session.rollback() if retry >= MAX_LOCK_RETRIES: raise else: # other transaction has created a lock, repeat to acquire # via update return _get_or_create_lock(name, session, nested, retry + 1) else: # lock found, acquiring by doing update existing.ts = timeutils.utcnow() existing.save(session) return session.transaction
def test_send(self): created_time = timeutils.utcnow() st = mock.Mock() st.state = ('x', 'f') st.status = st.state[0] st.action = st.state[1] st.name = 'fred' st.status_reason = 'this is why' st.created_time = created_time st.context = self.ctx st.id = 'hay-are-en' updated_time = timeutils.utcnow() st.updated_time = updated_time st.tags = ['tag1', 'tag2'] st.t = mock.MagicMock() st.t.__getitem__.return_value = 'for test' st.t.DESCRIPTION = 'description' notify = self.patchobject(notification, 'notify') notification.stack.send(st) notify.assert_called_once_with( self.ctx, 'stack.f.error', 'ERROR', {'state_reason': 'this is why', 'user_id': 'test_username', 'username': '******', 'user_identity': 'test_user_id', 'stack_identity': 'hay-are-en', 'stack_name': 'fred', 'tenant_id': 'test_tenant_id', 'create_at': created_time.isoformat(), 'state': 'x_f', 'description': 'for test', 'tags': ['tag1', 'tag2'], 'updated_at': updated_time.isoformat()})
def _pre_upgrade_035(self, engine): # The stacks id are for the 33 version migration event_table = utils.get_table(engine, 'event') data = [{ 'id': '22222222-152e-405d-b13a-35d4c816390c', 'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c', 'resource_action': 'Test', 'resource_status': 'TEST IN PROGRESS', 'resource_name': 'Testing Resource', 'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9', 'resource_status_reason': '', 'resource_type': '', 'resource_properties': None, 'created_at': timeutils.utcnow()}, {'id': '11111111-152e-405d-b13a-35d4c816390c', 'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c', 'resource_action': 'Test', 'resource_status': 'TEST COMPLETE', 'resource_name': 'Testing Resource', 'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9', 'resource_status_reason': '', 'resource_type': '', 'resource_properties': None, 'created_at': timeutils.utcnow() + datetime.timedelta(days=5)}] engine.execute(event_table.insert(), data) return data
def setUp(self): super(PoolWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') share_servers = [ {'id': 'fake_server_id0'}, {'id': 'fake_server_id1'}, {'id': 'fake_server_id2'}, {'id': 'fake_server_id3'}, {'id': 'fake_server_id4'}, ] services = [ dict(id=1, host='host1@AAA', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@BBB', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3@CCC', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host@DDD', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), dict(id=5, host='host5@EEE', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), ] self.host_manager.service_states = ( fakes.SHARE_SERVICE_STATES_WITH_POOLS) self.mock_object(db_api, 'share_server_get_all_by_host', mock.Mock(return_value=share_servers)) self.mock_object(db_api.IMPL, 'service_get_all_by_topic', mock.Mock(return_value=services))
def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self, req_mock): oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%c")}, {"host_name": "host2", "trust_lvl": "trusted", "vtime": timeutils.utcnow().strftime( "%D")}, # This is just a broken date to ensure that # we're not just arbitrarily accepting any # date format. ]} req_mock.return_value = requests.codes.OK, oat_data extra_specs = {'trust:trusted_host': 'trusted'} filter_properties = {'context': mock.sentinel.ctx, 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'host1', {}) bad_host = fakes.FakeHostState('host2', 'host2', {}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) self.assertFalse(self.filt_cls.host_passes(bad_host, filter_properties))
def _add_nodes_filters(self, query, filters): if filters is None: filters = [] if 'chassis_uuid' in filters: # get_chassis_by_uuid() to raise an exception if the chassis # is not found chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid']) query = query.filter_by(chassis_id=chassis_obj.id) if 'associated' in filters: if filters['associated']: query = query.filter(models.Node.instance_uuid != None) else: query = query.filter(models.Node.instance_uuid == None) if 'reserved' in filters: if filters['reserved']: query = query.filter(models.Node.reservation != None) else: query = query.filter(models.Node.reservation == None) if 'maintenance' in filters: query = query.filter_by(maintenance=filters['maintenance']) if 'driver' in filters: query = query.filter_by(driver=filters['driver']) if 'provision_state' in filters: query = query.filter_by(provision_state=filters['provision_state']) if 'provisioned_before' in filters: limit = timeutils.utcnow() - datetime.timedelta( seconds=filters['provisioned_before']) query = query.filter(models.Node.provision_updated_at < limit) if 'inspection_started_before' in filters: limit = ((timeutils.utcnow()) - (datetime.timedelta( seconds=filters['inspection_started_before']))) query = query.filter(models.Node.inspection_started_at < limit) return query
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Import cinder objects for test cases # objects.register_all() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string( os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if environ_enabled('OS_LOG_CAPTURE'): log_format = '%(levelname)s [%(name)s] %(message)s' if environ_enabled('OS_DEBUG'): level = logging.DEBUG else: level = logging.INFO self.useFixture( fixtures.LoggerFixture(nuke_handlers=False, format=log_format, level=level)) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.sqlite_db, sqlite_clean_db=CONF.sqlite_clean_db) self.useFixture(_DB_CACHE) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators self.mox = mox.Mox() self.addCleanup(CONF.reset) self.addCleanup(self.mox.UnsetStubs) self.addCleanup(self.mox.VerifyAll) self.addCleanup(self._common_cleanup) self.injected = [] lock_path = self.useFixture(fixtures.TempDir()).path lockutils.set_defaults(lock_path)
def _get_timestamp_filename(self): return '%s%s' % (TIMESTAMP_PREFIX, timeutils.utcnow().strftime(TIMESTAMP_FORMAT))
def _model_query(context, model, filters, fields=None): if 'project_id' not in filters: filters['project_id'] = filters.get('tenant_id') filters = filters or {} model_filters = [] eq_filters = [ "address", "cidr", "deallocated", "ip_version", "service", "mac_address_range_id", "transaction_id", "lock_id", "address_type", "completed", "resource_id" ] in_filters = [ "device_id", "device_owner", "group_id", "id", "mac_address", "name", "network_id", "segment_id", "subnet_id", "used_by_tenant_id", "version", "project_id" ] # Sanitize incoming filters to only attributes that exist in the model. # NOTE: Filters for unusable attributes are silently dropped here. # NOTE: When the filter key != attribute key, a conditional must be added # here. model_attrs = _model_attrs(model) filters = { x: y for x, y in filters.items() if x in model_attrs or (x == "tenant_id" and model == models.IPAddress) or (x == "ip_address" and model == models.IPAddress) or (x == "reuse_after" and model in (models.IPAddress, models.MacAddress)) } # Inject the tenant id if none is set. We don't need unqualified queries. # This works even when a non-shared, other-tenant owned network is passed # in because the authZ checks that happen in Neutron above us yank it back # out of the result set. if not filters.get("tenant_id") and not context.is_admin: filters["tenant_id"] = [context.tenant_id] if model == models.SecurityGroupRule: sg_rule_attribs = ["direction", "port_range_max", "port_range_min"] eq_filters.extend(sg_rule_attribs) for key, value in filters.items(): if key in in_filters: if value: model_type = getattr(model, key) model_filters.append(model_type.in_(value)) elif key in eq_filters: model_type = getattr(model, key) model_filters.append(model_type == value) elif key == "_deallocated": if value: model_filters.append(model._deallocated == 1) else: model_filters.append(model._deallocated != 1) elif key == "ethertype": etypes = [] for etype in value: etypes.append(protocols.translate_ethertype(etype)) if etypes: model_filters.append(model.ethertype.in_(etypes)) elif key == "ip_address": if value: model_filters.append( model.address.in_([ip.ipv6().value for ip in value])) elif key == 'protocol': pnums = [] for version in (protocols.PROTOCOLS_V4, protocols.PROTOCOLS_V6): pnums.extend([y for x, y in version.items() if x in value]) model_filters.append(model.protocol.in_(pnums)) elif key == "reuse_after": reuse = (timeutils.utcnow() - datetime.timedelta(seconds=value)) # NOTE(asadoughi): should this allow for deallocated_at = null? model_filters.append(model.deallocated_at <= reuse) elif key == "port_id": if model == models.PortIpAssociation: model_filters.append(model.port_id == value) elif key == "tenant_id" or key == "project_id": if model == models.IPAddress: if value: model_filters.append(model.used_by_tenant_id.in_(value)) elif model in _NO_TENANT_MODELS: pass else: if value: model_filters.append(model.project_id.in_(value)) return model_filters
def ip_address_deallocate(context, address, **kwargs): kwargs["_deallocated"] = 1 kwargs["deallocated_at"] = timeutils.utcnow() return ip_address_update(context, address, **kwargs)
def test_last_modified_same(self): now = timeutils.utcnow(with_timezone=True) self.resource_provider.updated_at = now self.resource_provider.created_at = now chosen_time = util.pick_last_modified(now, self.resource_provider) self.assertEqual(now, chosen_time)
def copy_and_move_data(resources_table_name): global variable_association_id_counter connection = op.get_bind() # Our table shapes - we avoid directly working with SA models # since we have to deal with coexistence of old and new schema. # # Note how pluralization works - 'cells' vs 'cell_variables'. # Fortunately all of our resources pluralize by just adding 's'. resource_name = resources_table_name[:-1] resources = sa.sql.table( resources_table_name, sa.sql.column('id', sa.Integer), sa.sql.column('variable_association_id', sa.Integer)) resource_variables = sa.sql.table( resource_name + '_variables', sa.sql.column('created_at', sa.DateTime), sa.sql.column('updated_at', sa.DateTime), sa.sql.column('parent_id', sa.Integer), sa.sql.column('key', sa.String), sa.sql.column('value', sqlalchemy_utils.types.json.JSONType)) variables = sa.sql.table( 'variables', sa.sql.column('created_at', sa.DateTime), sa.sql.column('updated_at', sa.DateTime), sa.sql.column('association_id', sa.Integer), sa.sql.column('key_', sa.String), sa.sql.column('value_', sqlalchemy_utils.types.json.JSONType)) variable_association = sa.sql.table( 'variable_association', sa.sql.column('created_at', sa.DateTime), sa.sql.column('id', sa.Integer), sa.sql.column('discriminator', sa.String)) # A smarter query might be possible on Postgres, but I do not # believe common table expressions (CTEs) are available on MySQL, # and certainly not SQLite. Let's just keep it really simple for # now. At least key/values are copied over using select into, and # avoid serializing in/out of the database. for resource in connection.execute(resources.select()): variable_association_id_counter += 1 # add variable_association_id value... connection.execute( resources.update().where(resources.c.id == resource.id).values( variable_association_id=sa.literal( variable_association_id_counter))) # create specific association - there is an additional level # of indirection, hence "polymorphic association" connection.execute(variable_association.insert().values( created_at=timeutils.utcnow(), id=variable_association_id_counter, discriminator=resource_name)) # copy over into 'variables' connection.execute(variables.insert().from_select( variables.c.keys(), sa.select([ resource_variables.c.created_at, resource_variables.c.updated_at, # only insert variables associated with this resource sa.literal(variable_association_id_counter), resource_variables.c.key, resource_variables.c.value ]).where(resource_variables.c.parent_id == resource.id))) op.drop_table(resource_name + '_variables')
def upgrade(): connection = op.get_bind() inspector = reflection.Inspector.from_engine(connection) if "deployments" not in inspector.get_table_names(): # 7287df262dbc did not fail. nothing to do return envs = [env["uuid"] for env in connection.execute(envs_helper.select())] for deployment in connection.execute(deployments_helper.select()): if deployment["uuid"] in envs: # this deployment had been migrated by 7287df262dbc. Nothing to do continue status = "FAILED TO CREATE" spec = deployment.config extras = {} platform_data = None if isinstance(spec, dict) and ( # existing cloud is only one deployment engine which we # continue supporting spec.get("type", "") == "ExistingCloud" # We know only about one credential type and it doesn't require # writing additional plugins at the moment. and (set(spec["creds"]) == {"openstack"} or not spec["creds"])): status = STATUS_MAP[deployment.enum_deployments_status] extras = deployment.config.get("extra", {}) if "openstack" in spec["creds"]: spec = {"existing@openstack": spec["creds"]["openstack"]} creds = copy.deepcopy(spec["existing@openstack"]) platform_data = { "admin": creds.pop("admin", {}), "users": creds.pop("users", []) } platform_data["admin"].update(creds) for user in platform_data["users"]: user.update(creds) else: # empty deployment spec = {} connection.execute(envs_helper.insert(), [{ "uuid": deployment.uuid, "name": deployment.name, "description": "", "status": status, "spec": spec, "extras": extras, "created_at": deployment.created_at, "updated_at": timeutils.utcnow() }]) if platform_data: connection.execute(platforms_helper.insert(), [{ "uuid": str(uuid.uuid4()), "env_uuid": deployment.uuid, "status": "READY", "plugin_name": "existing@openstack", "plugin_spec": spec["existing@openstack"], "plugin_data": {}, "platform_name": "openstack", "platform_data": platform_data, "created_at": timeutils.utcnow(), "updated_at": timeutils.utcnow() }]) op.add_column("verifications", sa.Column("env_uuid", sa.String(36))) op.add_column("tasks", sa.Column("env_uuid", sa.String(36))) conn = op.get_bind() conn.execute( tasks_helper.update().values(env_uuid=tasks_helper.c.deployment_uuid)) conn.execute(verifications_helper.update().values( env_uuid=verifications_helper.c.deployment_uuid)) with op.batch_alter_table("tasks") as batch_op: batch_op.alter_column("env_uuid", nullable=False) batch_op.drop_index("task_deployment") batch_op.drop_column("deployment_uuid") with op.batch_alter_table("verifications") as batch_op: batch_op.alter_column("env_uuid", nullable=False) batch_op.drop_column("deployment_uuid") op.drop_index("resource_deployment_uuid", "resources") op.drop_index("resource_provider_name", "resources") op.drop_index("resource_type", "resources") op.drop_index("resource_provider_name_and_type", "resources") op.drop_table("resources") op.drop_index("deployment_uuid", "deployments") op.drop_index("deployment_parent_uuid", "deployments") op.drop_table("deployments")
def update_capabilities(self, cell_metadata): """Update cell capabilities for a cell.""" self.last_seen = timeutils.utcnow() self.capabilities = cell_metadata
def _refresh_clean_steps(self, task): """Refresh the node's cached clean steps from the booted agent. Gets the node's clean steps from the booted agent and caches them. The steps are cached to make get_clean_steps() calls synchronous, and should be refreshed as soon as the agent boots to start cleaning or if cleaning is restarted because of a cleaning version mismatch. :param task: a TaskManager instance :raises: NodeCleaningFailure if the agent returns invalid results """ node = task.node previous_steps = node.driver_internal_info.get( 'agent_cached_clean_steps') LOG.debug( 'Refreshing agent clean step cache for node %(node)s. ' 'Previously cached steps: %(steps)s', { 'node': node.uuid, 'steps': previous_steps }) agent_result = self._client.get_clean_steps(node, task.ports).get( 'command_result', {}) missing = set(['clean_steps', 'hardware_manager_version']).difference(agent_result) if missing: raise exception.NodeCleaningFailure( _('agent get_clean_steps for node %(node)s returned an invalid ' 'result. Keys: %(keys)s are missing from result: %(result)s.' ) % ({ 'node': node.uuid, 'keys': missing, 'result': agent_result })) # agent_result['clean_steps'] looks like # {'HardwareManager': [{step1},{steps2}...], ...} steps = collections.defaultdict(list) for step_list in agent_result['clean_steps'].values(): for step in step_list: missing = set(['interface', 'step', 'priority']).difference(step) if missing: raise exception.NodeCleaningFailure( _('agent get_clean_steps for node %(node)s returned an ' 'invalid clean step. Keys: %(keys)s are missing from ' 'step: %(step)s.') % ({ 'node': node.uuid, 'keys': missing, 'step': step })) steps[step['interface']].append(step) # Save hardware manager version, steps, and date info = node.driver_internal_info info['hardware_manager_version'] = agent_result[ 'hardware_manager_version'] info['agent_cached_clean_steps'] = dict(steps) info['agent_cached_clean_steps_refreshed'] = str(timeutils.utcnow()) node.driver_internal_info = info node.save() LOG.debug( 'Refreshed agent clean step cache for node %(node)s: ' '%(steps)s', { 'node': node.uuid, 'steps': steps })
def update_capacities(self, capacities): """Update capacity information for a cell.""" self.last_seen = timeutils.utcnow() self.capacities = capacities
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import timeutils from patron import db from patron import exception from patron.objects import aggregate from patron.tests.unit import fake_notifier from patron.tests.unit.objects import test_objects NOW = timeutils.utcnow().replace(microsecond=0) fake_aggregate = { 'created_at': NOW, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'name': 'fake-aggregate', 'hosts': ['foo', 'bar'], 'metadetails': { 'this': 'that' }, } SUBS = {'metadata': 'metadetails'}
def _time_to_sync(self): """Is it time to sync the DB against our memory cache?""" diff = timeutils.utcnow() - self.last_cell_db_check return diff.seconds >= CONF.cells.db_check_interval
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier p = mock.patch('waterfall.rpc.get_notifier', side_effect=self._get_joined_notifier) p.start() # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string( os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(waterfall_fixtures.StandardLogging()) rpc.add_extra_exmods("waterfall.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in waterfall.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.database.sqlite_db, sqlite_clean_db='clean.sqlite') self.useFixture(_DB_CACHE) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.WaterfallObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.WaterfallObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.mox = mox_fixture.mox self.stubs = mox_fixture.stubs self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self.injected = [] self._services = [] fake_notifier.stub_notifier(self.stubs) self.override_config('fatal_exception_format_errors', True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.join( os.path.dirname(__file__), '..', )), 'waterfall/tests/unit/policy.json'), group='oslo_policy') self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # waterfall.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {}
def post(self, direct='', samples=None): """Post a list of new Samples to Telemetry. :param direct: a flag indicates whether the samples will be posted directly to storage or not. :param samples: a list of samples within the request body. """ rbac.enforce('create_samples', pecan.request) direct = strutils.bool_from_string(direct) if not samples: msg = _('Samples should be included in request body') raise base.ClientSideError(msg) now = timeutils.utcnow() auth_project = rbac.get_limited_to_project(pecan.request.headers) def_source = pecan.request.cfg.sample_source def_project_id = pecan.request.headers.get('X-Project-Id') def_user_id = pecan.request.headers.get('X-User-Id') published_samples = [] for s in samples: if self.meter_name != s.counter_name: raise wsme.exc.InvalidInput('counter_name', s.counter_name, 'should be %s' % self.meter_name) if s.message_id: raise wsme.exc.InvalidInput('message_id', s.message_id, 'The message_id must not be set') if s.counter_type not in sample.TYPES: raise wsme.exc.InvalidInput( 'counter_type', s.counter_type, 'The counter type must be: ' + ', '.join(sample.TYPES)) s.user_id = (s.user_id or def_user_id) s.project_id = (s.project_id or def_project_id) s.source = '%s:%s' % (s.project_id, (s.source or def_source)) s.timestamp = (s.timestamp or now) if auth_project and auth_project != s.project_id: # non admin user trying to cross post to another project_id auth_msg = 'can not post samples to other projects' raise wsme.exc.InvalidInput('project_id', s.project_id, auth_msg) published_sample = sample.Sample( name=s.counter_name, type=s.counter_type, unit=s.counter_unit, volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp.isoformat(), resource_metadata=utils.restore_nesting(s.resource_metadata, separator='.'), source=s.source) s.message_id = published_sample.id sample_dict = publisher_utils.meter_message_from_counter( published_sample, cfg.CONF.publisher.telemetry_secret) if direct: ts = timeutils.parse_isotime(sample_dict['timestamp']) sample_dict['timestamp'] = timeutils.normalize_time(ts) pecan.request.storage_conn.record_metering_data(sample_dict) else: published_samples.append(sample_dict) if not direct: pecan.request.notifier.sample( { 'user': def_user_id, 'tenant': def_project_id, 'is_admin': True }, 'telemetry.api', {'samples': published_samples}) return samples
def __init__(self, user_id=None, project_id=None, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, user_auth_plugin=None, **kwargs): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param user_auth_plugin: The auth plugin for the current request's authentication data. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ user = kwargs.pop('user', None) tenant = kwargs.pop('tenant', None) super(RequestContext, self).__init__( auth_token=auth_token, user=user_id or user, tenant=project_id or tenant, domain=kwargs.pop('domain', None), user_domain=kwargs.pop('user_domain', None), project_domain=kwargs.pop('project_domain', None), is_admin=is_admin, read_only=kwargs.pop('read_only', False), show_deleted=kwargs.pop('show_deleted', False), request_id=request_id, resource_uuid=kwargs.pop('resource_uuid', None), overwrite=overwrite) # oslo_context's RequestContext.to_dict() generates this field, we can # safely ignore this as we don't use it. kwargs.pop('user_identity', None) if kwargs: LOG.warning(_LW('Arguments dropped when creating context: %s') % str(kwargs)) # FIXME(dims): user_id and project_id duplicate information that is # already present in the oslo_context's RequestContext. We need to # get rid of them. self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('volume', 'volumev2', 'key-manager')] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin self.user_auth_plugin = user_auth_plugin if self.is_admin is None: self.is_admin = policy.check_is_admin(self)
def last_completed_audit_period(unit=None, before=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. before: Give the audit period most recently completed before <timestamp>. Defaults to now. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.instance_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) if before is not None: rightnow = before else: rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end)
def test_already_finished(self): self.node_info.finished_at = timeutils.utcnow() self.assertRaisesRegex(utils.Error, 'already finished', process.process, self.data) self.assertFalse(self.process_mock.called) self.assertFalse(self.find_mock.return_value.finished.called)
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ from oslo_utils import timeutils from manila.scheduler.drivers import filter from manila.scheduler import host_manager from manila.scheduler.weighers import base_host as base_host_weigher SHARE_SERVICES_NO_POOLS = [ dict(id=1, host='host1', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@back1', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host2@back2', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), ] SERVICE_STATES_NO_POOLS = { 'host1': dict(share_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312, max_over_subscription_ratio=1.0, thin_provisioning=False, snapshot_support=False, driver_handles_share_servers=False),
def test_redo_job(self): for job_type in self.all_job_types: job = self._prepare_job_element(job_type) jobs = [ # create an entirely new job { "job": job, "expected_error": 200 }, ] self._test_and_check(jobs) response = self.app.get('/v1.0/jobs') return_job = response.json jobs = return_job['jobs'] # redo a new job for job in jobs: response_1 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job['id']}, expect_errors=True) self.assertEqual(response_1.status_int, 200) response_2 = self.app.put('/v1.0/jobs/123', expect_errors=True) self.assertEqual(response_2.status_int, 404) # redo a running job job_type_3 = constants.JT_NETWORK_UPDATE job_3 = self._prepare_job_element(job_type_3) resource_id_3 = '#'.join([ job_3['resource'][resource_id] for resource_type, resource_id in self.job_resource_map[job_type_3] ]) job_running_3 = db_api.register_job(self.context, job_3['project_id'], job_type_3, resource_id_3) self.assertEqual(constants.JS_Running, job_running_3['status']) response_3 = self.app.put('/v1.0/jobs/%(id)s' % {'id': job_running_3['id']}, expect_errors=True) self.assertEqual(response_3.status_int, 400) # redo a failed job job_type_4 = constants.JT_NETWORK_UPDATE job_4 = self._prepare_job_element(job_type_4) job_dict_4 = {"job": job_4, "expected_error": 200} response_4 = self.app.post_json('/v1.0/jobs', dict(job=job_dict_4['job']), expect_errors=True) return_job_4 = response_4.json self.assertEqual(response_4.status_int, 200) db_api.finish_job(self.context, return_job_4['job']['id'], False, timeutils.utcnow()) job_fail_4 = db_api.get_job(self.context, return_job_4['job']['id']) self.assertEqual(constants.JS_Fail, job_fail_4['status']) response_5 = self.app.put('/v1.0/jobs/%(id)s' % {'id': return_job_4['job']['id']}, expect_errors=True) self.assertEqual(response_5.status_int, 200) # redo a successful job job_type_6 = constants.JT_NETWORK_UPDATE job_6 = self._prepare_job_element(job_type_6) job_dict_6 = {"job": job_6, "expected_error": 200} response_6 = self.app.post_json('/v1.0/jobs', dict(job=job_dict_6['job']), expect_errors=True) return_job_6 = response_6.json with self.context.session.begin(): job_dict = { 'status': constants.JS_Success, 'timestamp': timeutils.utcnow(), 'extra_id': uuidutils.generate_uuid() } core.update_resource(self.context, models.AsyncJob, return_job_6['job']['id'], job_dict) job_succ_6 = db_api.get_job(self.context, return_job_6['job']['id']) self.assertEqual(constants.JS_Success, job_succ_6['status']) response_7 = self.app.put('/v1.0/jobs/%(id)s' % {'id': return_job_6['job']['id']}, expect_errors=True) self.assertEqual(response_7.status_int, 400)
_RESOURCE_PROVIDER_NAME = str(uuids.resource_name) _RESOURCE_CLASS_ID = 2 _ALLOCATION_ID = 2 _ALLOCATION_DB = { 'id': _ALLOCATION_ID, 'resource_provider_id': _RESOURCE_PROVIDER_ID, 'resource_class_id': _RESOURCE_CLASS_ID, 'consumer_uuid': uuids.fake_instance, 'consumer_id': 1, 'consumer_generation': 0, 'used': 8, 'user_id': 1, 'user_external_id': uuids.user_id, 'project_id': 1, 'project_external_id': uuids.project_id, 'updated_at': timeutils.utcnow(with_timezone=True), 'created_at': timeutils.utcnow(with_timezone=True), } _ALLOCATION_BY_CONSUMER_DB = { 'id': _ALLOCATION_ID, 'resource_provider_id': _RESOURCE_PROVIDER_ID, 'resource_class_id': _RESOURCE_CLASS_ID, 'consumer_uuid': uuids.fake_instance, 'consumer_id': 1, 'consumer_generation': 0, 'used': 8, 'user_id': 1, 'user_external_id': uuids.user_id, 'project_id': 1, 'project_external_id': uuids.project_id,
def soft_delete(self, session=None): """Mark this object as deleted.""" self.update_and_save({'deleted_at': timeutils.utcnow()}, session=session)
def test_lifecycle(self): doc = '{"ttl": 100, "grace": 60}' # First, claim some messages body = self.simulate_post(self.claims_path, body=doc, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_201) claimed = jsonutils.loads(body[0])['messages'] claim_href = self.srmock.headers_dict['Location'] message_href, params = claimed[0]['href'].split('?') # No more messages to claim self.simulate_post(self.claims_path, body=doc, query_string='limit=3', headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Listing messages, by default, won't include claimed, will echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=true") self.assertEqual(self.srmock.status, falcon.HTTP_200) self._empty_message_list(body) # Listing messages, by default, won't include claimed, won't echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=false") self.assertEqual(self.srmock.status, falcon.HTTP_200) self._empty_message_list(body) # List messages, include_claimed, but don't echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_200) self._empty_message_list(body) # List messages with a different client-id and echo=false. # Should return some messages headers = self.headers.copy() headers["Client-ID"] = str(uuid.uuid4()) body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=headers) self.assertEqual(self.srmock.status, falcon.HTTP_200) # Include claimed messages this time, and echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=true', headers=self.headers) listed = jsonutils.loads(body[0]) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(len(listed['messages']), len(claimed)) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'zaqar.openstack.common.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now body = self.simulate_get(claim_href, headers=self.headers) claim = jsonutils.loads(body[0]) self.assertEqual(self.srmock.status, falcon.HTTP_200) self.assertEqual(claim['ttl'], 100) # NOTE(cpp-cabrera): verify that claim age is non-negative self.assertThat(claim['age'], matchers.GreaterThan(-1)) # Try to delete the message without submitting a claim_id self.simulate_delete(message_href, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_403) # Delete the message and its associated claim self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Try to get it from the wrong project headers = { 'Client-ID': str(uuid.uuid4()), 'X-Project-ID': 'bogusproject' } self.simulate_get(message_href, query_string=params, headers=headers) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Get the message self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Update the claim new_claim_ttl = '{"ttl": 60, "grace": 60}' creation = timeutils.utcnow() self.simulate_patch(claim_href, body=new_claim_ttl, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Get the claimed messages (again) body = self.simulate_get(claim_href, headers=self.headers) query = timeutils.utcnow() claim = jsonutils.loads(body[0]) message_href, params = claim['messages'][0]['href'].split('?') self.assertEqual(claim['ttl'], 60) estimated_age = timeutils.delta_seconds(creation, query) self.assertTrue(estimated_age > claim['age']) # Delete the claim self.simulate_delete(claim['href'], headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_204) # Try to delete a message with an invalid claim ID self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_400) # Make sure it wasn't deleted! self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_200) # Try to get a claim that doesn't exist self.simulate_get(claim['href'], headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_404) # Try to update a claim that doesn't exist self.simulate_patch(claim['href'], body=doc, headers=self.headers) self.assertEqual(self.srmock.status, falcon.HTTP_404)
def __init__(self, user_id: Optional[str] = None, project_id: Optional[str] = None, is_admin: Optional[bool] = None, read_deleted: Optional[str] = "no", project_name: Optional[str] = None, remote_address: Optional[str] = None, timestamp=None, quota_class=None, service_catalog: Optional[dict] = None, user_auth_plugin=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ # NOTE(smcginnis): To keep it compatible for code using positional # args, explicityly set user_id and project_id in kwargs. kwargs.setdefault('user_id', user_id) kwargs.setdefault('project_id', project_id) super(RequestContext, self).__init__(is_admin=is_admin, **kwargs) self.project_name = project_name self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() elif isinstance(timestamp, str): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.quota_class = quota_class self.message_resource_id = None self.message_resource_type = None self.message_action = None if service_catalog: # Only include required parts of service_catalog self.service_catalog = [ s for s in service_catalog if s.get('type') in ('identity', 'compute', 'object-store', 'image', 'key-manager') ] else: # if list is empty or none self.service_catalog = [] # We need to have RequestContext attributes defined # when policy.check_is_admin invokes request logging # to make it loggable. self.is_admin: Optional[bool] if self.is_admin is None: self.is_admin = policy.check_is_admin(self) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.user_auth_plugin = user_auth_plugin
def test_get_one_and_get_all(self, mock_context): self.context.project_id = "fake_project_id" mock_context.return_value = self.context all_job_ids = {} all_job_project_ids = {} index = 0 for job_type in self.all_job_types: if index == 0: # the first job has a project ID that differs from # context.project_id job = self._prepare_job_element(job_type) else: job = self._prepare_job_element(job_type, self.context.project_id) job = {"job": job, "expected_error": 200} back_jobid = self._test_and_obtain_id(job) all_job_ids[index] = back_jobid all_job_project_ids[job_type] = job['job']['project_id'] index = index + 1 service_uris = ['jobs', 'jobs/detail'] amount_of_all_jobs = len(self.all_job_types) # with no filters all jobs are returned for service_uri in service_uris: response_1 = self.app.get('/v1.0/%(service_uri)s' % {'service_uri': service_uri}) return_jobs_1 = response_1.json self.assertEqual(amount_of_all_jobs - 1, len(return_jobs_1['jobs'])) self.assertIn('status', response_1) self.assertIn('resource', response_1) self.assertIn('project_id', response_1) self.assertIn('id', response_1) self.assertIn('timestamp', response_1) self.assertIn('type', response_1) self.assertNotIn('extra_id', response_1) self.assertNotIn('resource_id', response_1) # use job status filter response_2 = self.app.get('/v1.0/jobs?status=new') return_jobs_2 = response_2.json self.assertEqual(amount_of_all_jobs - 1, len(return_jobs_2['jobs'])) response = self.app.get('/v1.0/jobs?status=fail') return_jobs_3 = response.json self.assertEqual(0, len(return_jobs_3['jobs'])) amount_of_fail_jobs = int(amount_of_all_jobs / 3) for i in xrange(amount_of_fail_jobs): db_api.finish_job(self.context, all_job_ids[i], False, timeutils.utcnow()) amount_of_succ_jobs = int(amount_of_all_jobs / 3) for i in xrange(amount_of_succ_jobs): db_api.finish_job(self.context, all_job_ids[amount_of_fail_jobs + i], True, timeutils.utcnow()) for service_uri in service_uris: response = self.app.get('/v1.0/%(service_uri)s?status=fail' % {'service_uri': service_uri}) return_jobs = response.json self.assertEqual(amount_of_fail_jobs - 1, len(return_jobs['jobs'])) response = self.app.get('/v1.0/%(service_uri)s?status=success' '' % {'service_uri': service_uri}) return_jobs = response.json self.assertEqual(amount_of_succ_jobs, len(return_jobs['jobs'])) # project ID filter in URL query string will be ignored, and # only the project ID in which the user is authorized will # be used as filter. response = self.app.get('/v1.0/%(service_uri)s' % {'service_uri': service_uri}) return_job = response.json response1 = self.app.get( '/v1.0/%(service_uri)s?project_id=%(project_id)s' % { 'service_uri': service_uri, 'project_id': uuidutils.generate_uuid() }) return_job1 = response1.json response2 = self.app.get( '/v1.0/%(service_uri)s?project_id=%(project_id)s' % { 'service_uri': service_uri, 'project_id': 'fake_project_id' }) return_job2 = response2.json self.assertEqual(len(return_job2['jobs']), len(return_job1['jobs'])) self.assertEqual(len(return_job['jobs']), len(return_job2['jobs'])) # use job type filter count = 1 for job_type in self.all_job_types: response = self.app.get('/v1.0/%(service_uri)s?type=%(type)s' '' % { 'service_uri': service_uri, 'type': job_type }) return_job = response.json if count == 1: self.assertEqual(0, len(return_job['jobs'])) else: self.assertEqual(1, len(return_job['jobs'])) count += 1 # combine job type and job status filter for i in xrange(1, amount_of_all_jobs): if i < amount_of_fail_jobs: # this aims to test service "/v1.0/jobs/{id}" response_1 = self.app.get('/v1.0/jobs/%(id)s' % {'id': all_job_ids[i]}) return_job_1 = response_1.json response_2 = self.app.get( '/v1.0/%(service_uri)s?' 'type=%(type)s&' 'status=%(status)s' % { 'service_uri': service_uri, 'type': return_job_1['job']['type'], 'status': 'fail' }) return_job_2 = response_2.json self.assertEqual(1, len(return_job_2['jobs'])) elif ((i >= amount_of_fail_jobs) and (i < amount_of_fail_jobs + amount_of_succ_jobs)): # those jobs are set to 'success' and they are moved to # job log. their job ids are not stored in all_job_ids job_type = self.all_job_types[i] response = self.app.get( '/v1.0/%(service_uri)s?' 'type=%(type)s&status=%(status)s' % { 'service_uri': service_uri, 'type': job_type, 'status': 'success' }) return_job = response.json self.assertEqual(1, len(return_job['jobs'])) response_2 = self.app.get( '/v1.0/%(service_uri)s?status=%(status)s' '&type=%(type)s' % { 'service_uri': service_uri, 'status': "success-x", 'type': job_type }) return_job_2 = response_2.json self.assertEqual(0, len(return_job_2['jobs'])) else: response_1 = self.app.get('/v1.0/jobs/%(id)s' % {'id': all_job_ids[i]}) return_job_1 = response_1.json response_2 = self.app.get( '/v1.0/%(service_uri)s?' 'type=%(type)s&status=%(status)s' % { 'service_uri': service_uri, 'type': return_job_1['job']['type'], 'status': 'new' }) return_job_2 = response_2.json self.assertEqual(1, len(return_job_2['jobs'])) response_3 = self.app.get( '/v1.0/%(service_uri)s?status=%(status)s' '&type=%(type)s' % { 'service_uri': service_uri, 'status': "new-x", 'type': return_job_1['job']['type'] }) return_job_3 = response_3.json self.assertEqual(0, len(return_job_3['jobs'])) # use unsupported filter, it will raise 400 error response = self.app.get('/v1.0/%(service_uri)s?' 'fake_filter=%(fake_filter)s' '' % { 'service_uri': service_uri, 'fake_filter': "fake_filter" }, expect_errors=True) self.assertEqual(response.status_int, 400) # use invalid filter, it will return empty set response = self.app.get('/v1.0/%(service_uri)s?status=%(status)s' '' % { 'service_uri': service_uri, 'status': "new-x" }) return_job = response.json self.assertEqual(0, len(return_job['jobs'])) @patch.object(context, 'extract_context_from_environ', new=fake_admin_context) def test_get_job_schemas(self): response = self.app.get('/v1.0/jobs/schemas') return_job_schemas = response.json job_schemas = [] for job_type in self.all_job_types: job = {} resource = [] for resource_type, resource_id in ( self.job_resource_map[job_type]): resource.append(resource_id) job['resource'] = resource job['type'] = job_type job_schemas.append(job) self.assertEqual(job_schemas, return_job_schemas['schemas'])
def test_get_instance_diagnostics(self): instance_ref, network_info = self._get_running_instance(obj=True) instance_ref['launched_at'] = timeutils.utcnow() self.connection.get_instance_diagnostics(instance_ref)
def provide_ems(self, requester, netapp_backend, app_version, server_type="cluster"): """Provide ems with volume stats for the requester. :param server_type: cluster or 7mode. """ def _create_ems(netapp_backend, app_version, server_type): """Create ems API request.""" ems_log = netapp_api.NaElement('ems-autosupport-log') host = socket.getfqdn() or 'Cinder_node' if server_type == "cluster": dest = "cluster node" else: dest = "7 mode controller" ems_log.add_new_child('computer-name', host) ems_log.add_new_child('event-id', '0') ems_log.add_new_child('event-source', 'Cinder driver %s' % netapp_backend) ems_log.add_new_child('app-version', app_version) ems_log.add_new_child('category', 'provisioning') ems_log.add_new_child('event-description', 'OpenStack Cinder connected to %s' % dest) ems_log.add_new_child('log-level', '6') ems_log.add_new_child('auto-support', 'false') return ems_log def _create_vs_get(): """Create vs_get API request.""" vs_get = netapp_api.NaElement('vserver-get-iter') vs_get.add_new_child('max-records', '1') query = netapp_api.NaElement('query') query.add_node_with_children('vserver-info', **{'vserver-type': 'node'}) vs_get.add_child_elem(query) desired = netapp_api.NaElement('desired-attributes') desired.add_node_with_children( 'vserver-info', **{ 'vserver-name': '', 'vserver-type': '' }) vs_get.add_child_elem(desired) return vs_get def _get_cluster_node(na_server): """Get the cluster node for ems.""" na_server.set_vserver(None) vs_get = _create_vs_get() res = na_server.invoke_successfully(vs_get) if (res.get_child_content('num-records') and int(res.get_child_content('num-records')) > 0): attr_list = res.get_child_by_name('attributes-list') vs_info = attr_list.get_child_by_name('vserver-info') vs_name = vs_info.get_child_content('vserver-name') return vs_name return None do_ems = True if hasattr(requester, 'last_ems'): sec_limit = 3559 if not (timeutils.is_older_than(requester.last_ems, sec_limit)): do_ems = False if do_ems: na_server = copy.copy(self.connection) na_server.set_timeout(25) ems = _create_ems(netapp_backend, app_version, server_type) try: if server_type == "cluster": api_version = na_server.get_api_version() if api_version: major, minor = api_version else: raise netapp_api.NaApiError( code='Not found', message='No API version found') if major == 1 and minor > 15: node = getattr(requester, 'vserver', None) else: node = _get_cluster_node(na_server) if node is None: raise netapp_api.NaApiError(code='Not found', message='No vserver found') na_server.set_vserver(node) else: na_server.set_vfiler(None) na_server.invoke_successfully(ems, True) LOG.debug("ems executed successfully.") except netapp_api.NaApiError as e: LOG.warning(_LW("Failed to invoke ems. Message : %s"), e) finally: requester.last_ems = timeutils.utcnow()
def _cast_create_volume(self, context, request_spec, filter_properties): source_volid = request_spec['source_volid'] source_replicaid = request_spec['source_replicaid'] volume_id = request_spec['volume_id'] volume = request_spec['volume'] snapshot_id = request_spec['snapshot_id'] image_id = request_spec['image_id'] cgroup_id = request_spec['consistencygroup_id'] host = None cgsnapshot_id = request_spec['cgsnapshot_id'] if cgroup_id: # If cgroup_id existed, we should cast volume to the scheduler # to choose a proper pool whose backend is same as CG's backend. cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) # FIXME(wanghao): CG_backend got added before request_spec was # converted to versioned objects. We should make sure that this # will be handled by object version translations once we add # RequestSpec object. request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. # # If snapshot_id is set and CONF.snapshot_same_host is True, make # the call create volume directly to the volume host where the # snapshot resides instead of passing it through the scheduler, so # snapshot can be copied to the new volume. snapshot = objects.Snapshot.get_by_id(context, snapshot_id) source_volume_ref = objects.Volume.get_by_id( context, snapshot.volume_id) host = source_volume_ref.host elif source_volid: source_volume_ref = objects.Volume.get_by_id(context, source_volid) host = source_volume_ref.host elif source_replicaid: source_volume_ref = objects.Volume.get_by_id( context, source_replicaid) host = source_volume_ref.host if not host: # Cast to the scheduler and let it handle whatever is needed # to select the target host for this volume. self.scheduler_rpcapi.create_volume( context, CONF.volume_topic, volume_id, snapshot_id=snapshot_id, image_id=image_id, request_spec=request_spec, filter_properties=filter_properties, volume=volume) else: # Bypass the scheduler and send the request directly to the volume # manager. volume.host = host volume.scheduled_at = timeutils.utcnow() volume.save() if not cgsnapshot_id: self.volume_rpcapi.create_volume(context, volume, volume.host, request_spec, filter_properties, allow_reschedule=False)
class TestNotificationBase(test.NoDBTestCase): @base.NovaObjectRegistry.register_if(False) class TestObject(base.NovaObject): VERSION = '1.0' fields = { 'field_1': fields.StringField(), 'field_2': fields.IntegerField(), 'not_important_field': fields.IntegerField(), 'lazy_field': fields.IntegerField() } def obj_load_attr(self, attrname): if attrname == 'lazy_field': self.lazy_field = 42 else: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) def __init__(self, not_important_field): super(TestNotificationBase.TestObject, self).__init__() # field1 and field_2 simulates that some fields are initialized # outside of the object's ctor self.not_important_field = not_important_field @base.NovaObjectRegistry.register_if(False) class TestNotificationPayload(notification.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), 'lazy_field': ('source_field', 'lazy_field') } fields = { 'extra_field': fields.StringField(), # filled by ctor # filled by the schema 'field_1': fields.StringField(nullable=True), 'field_2': fields.IntegerField(), # filled by the schema 'lazy_field': fields.IntegerField() # filled by the schema } def __init__(self, extra_field, source_field): super(TestNotificationBase.TestNotificationPayload, self).__init__() self.extra_field = extra_field self.populate_schema(source_field=source_field) @base.NovaObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notification.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': fields.StringField(), # filled by ctor } def __init__(self, extra_field): super(TestNotificationBase.TestNotificationPayloadEmptySchema, self).__init__() self.extra_field = extra_field @notification.notification_sample('test-update-1.json') @notification.notification_sample('test-update-2.json') @base.NovaObjectRegistry.register_if(False) class TestNotification(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayload') } @base.NovaObjectRegistry.register_if(False) class TestNotificationEmptySchema(notification.NotificationBase): VERSION = '1.0' fields = { 'payload': fields.ObjectField('TestNotificationPayloadEmptySchema') } fake_service = { 'created_at': timeutils.utcnow().replace(microsecond=0), 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'uuid': uuids.service, 'host': 'fake-host', 'binary': 'nova-fake', 'topic': 'fake-service-topic', 'report_count': 1, 'forced_down': False, 'disabled': False, 'disabled_reason': None, 'last_seen_up': None, 'version': 1} expected_payload = { 'nova_object.name': 'TestNotificationPayload', 'nova_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 15, 'lazy_field': 42}, 'nova_object.version': '1.0', 'nova_object.namespace': 'nova'} def setUp(self): super(TestNotificationBase, self).setUp() with mock.patch('nova.db.service_update') as mock_db_service_update: self.service_obj = objects.Service(context=mock.sentinel.context, id=self.fake_service['id']) self.service_obj.obj_reset_changes(['version']) mock_db_service_update.return_value = self.fake_service self.service_obj.save() self.my_obj = self.TestObject(not_important_field=13) self.my_obj.field_1 = 'test1' self.my_obj.field_2 = 15 self.payload = self.TestNotificationPayload( extra_field='test string', source_field=self.my_obj) self.notification = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE, phase=fields.NotificationPhase.START), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='nova-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertJsonEqual(expected_payload, actual_payload) @mock.patch('nova.rpc.LEGACY_NOTIFIER') @mock.patch('nova.rpc.NOTIFIER') def test_emit_notification(self, mock_notifier, mock_legacy): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) self.assertFalse(mock_legacy.called) @mock.patch('nova.rpc.NOTIFIER') def test_emit_with_host_and_binary_as_publisher(self, mock_notifier): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher(host='fake-host', binary='nova-fake'), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch('nova.rpc.LEGACY_NOTIFIER') @mock.patch('nova.rpc.NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy): noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) self.assertFalse(mock_legacy.called) @mock.patch('nova.rpc.NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): payload = self.TestNotificationPayload( extra_field='test string', source_field=self.my_obj) payload.populated = False noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=payload) mock_context = mock.Mock() self.assertRaises(AssertionError, noti.emit, mock_context) self.assertFalse(mock_notifier.called) def test_lazy_load_source_field(self): my_obj = self.TestObject(not_important_field=13) my_obj.field_1 = 'test1' my_obj.field_2 = 15 payload = self.TestNotificationPayload(extra_field='test string', source_field=my_obj) self.assertEqual(42, payload.lazy_field) def test_uninited_source_field_defaulted_to_none(self): my_obj = self.TestObject(not_important_field=13) # intentionally not initializing field_1 to simulate an uninited but # nullable field my_obj.field_2 = 15 payload = self.TestNotificationPayload(extra_field='test string', source_field=my_obj) self.assertIsNone(payload.field_1) def test_uninited_source_field_not_nullable_payload_field_fails(self): my_obj = self.TestObject(not_important_field=13) # intentionally not initializing field_2 to simulate an uninited no # nullable field my_obj.field_1 = 'test1' self.assertRaises(ValueError, self.TestNotificationPayload, extra_field='test string', source_field=my_obj) @mock.patch('nova.rpc.NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload= {'nova_object.name': 'TestNotificationPayloadEmptySchema', 'nova_object.data': {'extra_field': u'test string'}, 'nova_object.version': '1.0', 'nova_object.namespace': 'nova'}) def test_sample_decorator(self): self.assertEqual(2, len(self.TestNotification.samples)) self.assertIn('test-update-1.json', self.TestNotification.samples) self.assertIn('test-update-2.json', self.TestNotification.samples) @mock.patch('nova.notifications.objects.base.NotificationBase._emit') @mock.patch('nova.rpc.NOTIFIER') def test_payload_is_not_generated_if_notifier_is_not_enabled( self, mock_notifier, mock_emit): mock_notifier.is_enabled.return_value = False payload = self.TestNotificationPayload( extra_field='test string', source_field=self.my_obj) noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=payload) mock_context = mock.Mock() noti.emit(mock_context) self.assertFalse(payload.populated) self.assertFalse(mock_emit.called) @mock.patch('nova.notifications.objects.base.NotificationBase._emit') def test_payload_is_not_generated_if_notification_format_is_unversioned( self, mock_emit): self.flags(notification_format='unversioned', group='notifications') payload = self.TestNotificationPayload( extra_field='test string', source_field=self.my_obj) noti = self.TestNotification( event_type=notification.EventType( object='test_object', action=fields.NotificationAction.UPDATE), publisher=notification.NotificationPublisher.from_service_obj( self.service_obj), priority=fields.NotificationPriority.INFO, payload=payload) mock_context = mock.Mock() noti.emit(mock_context) self.assertFalse(payload.populated) self.assertFalse(mock_emit.called)
def service_expired_time(with_timezone=False): return (timeutils.utcnow(with_timezone=with_timezone) - datetime.timedelta(seconds=CONF.service_down_time))
def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token