def test_utcnow(self): with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_time self.assertEqual(timeutils.utcnow(), self.skynet_self_aware_time) self.assertFalse(timeutils.utcnow() == self.skynet_self_aware_time) self.assertTrue(timeutils.utcnow())
def _wait_on_task_execution(self, max_wait=5): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of seconds defined by max_wait while timeutils.delta_seconds(start, timeutils.utcnow()) < max_wait: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(http_client.OK, res.status) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: # Bug #1541487: we must give time to the server to execute the # task, but the server is run in the same process than the # test. Use eventlet to give the control to the pending server # task. eventlet.sleep(0.05) continue else: break
def _wait_on_task_execution(self): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of 5 seconds while timeutils.delta_seconds(start, timeutils.utcnow()) < 5: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, res.status) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: time.sleep(0.05) continue else: break
class ArtifactBase(models.ModelBase, models.TimestampMixin): """Base class for Artifact Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set(["created_at", "updated_at"]) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False, onupdate=lambda: timeutils.utcnow()) def save(self, session=None): from glance.db.sqlalchemy import api as db_api super(ArtifactBase, self).save(session or db_api.get_session()) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d
def _create_or_update(context, values, artifact_id, session, type_name, type_version=None): values = copy.deepcopy(values) with session.begin(): _set_version_fields(values) _validate_values(values) _drop_protected_attrs(models.Artifact, values) if artifact_id: # update existing artifact state = values.get('state') show_level = ga.Showlevel.BASIC if state is not None: if state == 'active': show_level = ga.Showlevel.DIRECT values['published_at'] = timeutils.utcnow() if state == 'deleted': values['deleted_at'] = timeutils.utcnow() artifact = _get(context, artifact_id, session, type_name, type_version, show_level=show_level) _validate_transition(artifact.state, values.get('state') or artifact.state) else: # create new artifact artifact = models.Artifact() if 'id' not in values: artifact.id = str(uuid.uuid4()) else: artifact.id = values['id'] if 'tags' in values: tags = values.pop('tags') artifact.tags = _do_tags(artifact, tags) if 'properties' in values: properties = values.pop('properties', {}) artifact.properties = _do_properties(artifact, properties) if 'blobs' in values: blobs = values.pop('blobs') artifact.blobs = _do_blobs(artifact, blobs) if 'dependencies' in values: dependencies = values.pop('dependencies') _do_dependencies(artifact, dependencies, session) if values.get('state', None) == 'publish': artifact.dependencies.extend( _do_transitive_dependencies(artifact, session)) artifact.update(values) try: artifact.save(session=session) except db_exc.DBDuplicateEntry: LOG.warn(_LW("Artifact with the specified type, name and version " "already exists")) raise exception.ArtifactDuplicateNameTypeVersion() return artifact
class GlanceBase(models.ModelBase, models.TimestampMixin): """Base class for Glance Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set( ["created_at", "updated_at", "deleted_at", "deleted"]) def save(self, session=None): from glance.db.sqlalchemy import api as db_api super(GlanceBase, self).save(session or db_api.get_session()) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(vsergeyev): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow()) # TODO(boris-42): Use SoftDeleteMixin instead of deleted Column after # migration that provides UniqueConstraints and change # type of this column. deleted_at = Column(DateTime) deleted = Column(Boolean, nullable=False, default=False) def delete(self, session=None): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = self.__dict__.copy() # NOTE(flaper87): Remove # private state instance # It is not serializable # and causes CircularReference d.pop("_sa_instance_state") return d
class GlanceMetadefBase(models.TimestampMixin): """Base class for Glance Metadef Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set(["created_at", "updated_at"]) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(wko): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow())
def test_task_invalid_status(self): task_id = str(uuid.uuid4()) status = 'blah' self.assertRaises(exception.InvalidTaskStatus, domain.Task, task_id, task_type='import', status=status, owner=None, expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), task_input=None, message=None, result=None)
def get_extra_fixture(self, id, name, **kwargs): created_at = kwargs.pop('created_at', timeutils.utcnow()) updated_at = kwargs.pop('updated_at', created_at) return self.get_fixture( id=id, name=name, deleted=False, deleted_at=None, created_at=created_at, updated_at=updated_at, **kwargs)
def _create_tasks(self): now = timeutils.utcnow() times = [now + datetime.timedelta(seconds=5 * i) for i in range(4)] self.tasks = [ _db_fixture(UUID1, owner=TENANT1, created_at=times[0], updated_at=times[0]), # FIXME(venkatesh): change the type to include clone and export # once they are included as a valid types under Task domain model. _db_fixture(UUID2, owner=TENANT2, type='import', created_at=times[1], updated_at=times[1]), _db_fixture(UUID3, owner=TENANT3, type='import', created_at=times[2], updated_at=times[2]), _db_fixture(UUID4, owner=TENANT4, type='import', created_at=times[3], updated_at=times[3]) ] [self.db.task_create(None, task) for task in self.tasks]
def test_get_index_sort_updated_at_desc(self): """ Tests that the /images registry API returns list of public images sorted by updated_at in descending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=None, updated_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='updated_at', sort_dir='desc') self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False)
def test_get_index_sort_updated_at_desc(self): """Tests that the registry API returns list of public images. Must be sorted by updated_at in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=None, updated_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['updated_at'], sort_dir=['desc']) self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False)
def new_artifact(self, name, version, **kwargs): id = kwargs.pop('id', str(uuid.uuid4())) tags = kwargs.pop('tags', []) # pop reserved fields from kwargs dict for param in [ 'owner', 'created_at', 'updated_at', 'deleted_at', 'visibility', 'state' ]: kwargs.pop(param, '') curr_timestamp = timeutils.utcnow() base = self.klass( id=id, name=name, version=version, visibility='private', state='creating', # XXX FIXME remove after using authentication # paste-flavor # (no or '' as owner will always be there) owner=self.context.owner or '', created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs) return base
def test_get_index_sort_status_desc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by status in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', status='queued') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['status'], sort_dir=['desc']) self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False)
def setUp(self): """Establish a clean test environment""" super(TestRegistryV2Client, self).setUp() db_api.get_engine() self.context = context.RequestContext(is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [ self.get_extra_fixture( id=UUID1, name='fake image #1', is_public=False, disk_format='ami', container_format='ami', size=13, virtual_size=26, properties={'type': 'kernel'}, location="swift://*****:*****@acct/container/obj.tar.0", created_at=uuid1_time), self.get_extra_fixture(id=UUID2, name='fake image #2', properties={}, size=19, virtual_size=38, location="file:///tmp/glance-tests/2", created_at=uuid2_time) ] self.destroy_fixtures() self.create_fixtures() self.client = rclient.RegistryClient("0.0.0.0")
def new_image_member(self, image, member_id): created_at = timeutils.utcnow() updated_at = created_at return ImageMembership(image_id=image.image_id, member_id=member_id, created_at=created_at, updated_at=updated_at, status='pending')
def new_task(self, task_type, owner, image_id, user_id, request_id, task_input=None, **kwargs): task_id = str(uuid.uuid4()) status = 'pending' # Note(nikhil): expires_at would be set on the task, only when it # succeeds or fails. expires_at = None created_at = timeutils.utcnow() updated_at = created_at return Task( task_id, task_type, status, owner, image_id, user_id, request_id, expires_at, created_at, updated_at, task_input, kwargs.get('result'), kwargs.get('message'), )
def new_object(self, namespace, name, **kwargs): object_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefObject(namespace, object_id, name, created_at, updated_at, kwargs.get('required'), kwargs.get('description'), kwargs.get('properties'))
def test_task_invalid_status(self): task_id = str(uuid.uuid4()) status = 'blah' self.assertRaises( exception.InvalidTaskStatus, domain.Task, task_id, task_type='import', status=status, owner=None, expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), task_input=None, message=None, result=None )
def test_get_image_details_with_changes_since(self): """Tests that a detailed call can be filtered by changes-since""" dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) dt3 = timeutils.utcnow() + datetime.timedelta(2) dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='fake image #3') db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='fake image #4', created_at=dt3, updated_at=dt3) db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) images = self.client.get_images_detailed(filters={}) self.assertEqualImages(images, (UUID4, UUID2), unjsonify=False) # Expect 3 images (1 deleted) filters = {'changes-since': iso1} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (UUID4, UUID3, UUID2), unjsonify=False) # Expect 1 images (0 deleted) filters = {'changes-since': iso2} images = self.client.get_images_detailed(filters=filters) self.assertEqualImages(images, (UUID4, ), unjsonify=False) # Expect 0 images (0 deleted) filters = {'changes-since': iso4} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (), unjsonify=False)
def _populate_resource_types(resource_types_table): now = timeutils.utcnow() for resource_type in RESOURCE_TYPES: values = { 'name': resource_type, 'protected': True, 'created_at': now, 'updated_at': now } resource_types_table.insert(values=values).execute()
def new_namespace(self, namespace, owner, **kwargs): namespace_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefNamespace(namespace_id, namespace, kwargs.get('display_name'), kwargs.get('description'), owner, kwargs.get('visibility'), kwargs.get('protected'), created_at, updated_at)
def test_fail(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.fail('{"message": "connection failed"}') self.assertEqual('failure', self.task.status) self.assertEqual('{"message": "connection failed"}', self.task.message) self.assertIsNone(self.task.result) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual(expected, self.task.expires_at)
def test_succeed(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.succeed('{"location": "file://home"}') self.assertEqual('success', self.task.status) self.assertEqual('{"location": "file://home"}', self.task.result) self.assertEqual(u'', self.task.message) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual(expected, self.task.expires_at)
def test_get_image_details_with_changes_since(self): """Tests that a detailed call can be filtered by changes-since""" dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) dt3 = timeutils.utcnow() + datetime.timedelta(2) dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='fake image #3') db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='fake image #4', created_at=dt3, updated_at=dt3) db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) images = self.client.get_images_detailed(filters={}) self.assertEqualImages(images, (UUID4, UUID2), unjsonify=False) # Expect 3 images (1 deleted) filters = {'changes-since': iso1} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (UUID4, UUID3, UUID2), unjsonify=False) # Expect 1 images (0 deleted) filters = {'changes-since': iso2} images = self.client.get_images_detailed(filters=filters) self.assertEqualImages(images, (UUID4,), unjsonify=False) # Expect 0 images (0 deleted) filters = {'changes-since': iso4} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (), unjsonify=False)
def new_resource_type(self, namespace, name, **kwargs): created_at = timeutils.utcnow() updated_at = created_at return MetadefResourceType( namespace, name, kwargs.get('prefix'), kwargs.get('properties_target'), created_at, updated_at )
def new_tag(self, namespace, name, **kwargs): tag_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefTag( namespace, tag_id, name, created_at, updated_at )
def test_succeed(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.succeed('{"location": "file://home"}') self.assertEqual('success', self.task.status) self.assertEqual('{"location": "file://home"}', self.task.result) self.assertEqual(u'', self.task.message) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at )
def test_fail(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.fail('{"message": "connection failed"}') self.assertEqual('failure', self.task.status) self.assertEqual('{"message": "connection failed"}', self.task.message) self.assertIsNone(self.task.result) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at )
def new_object(self, namespace, name, **kwargs): object_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefObject( namespace, object_id, name, created_at, updated_at, kwargs.get('required'), kwargs.get('description'), kwargs.get('properties') )
def _create_tasks(self): now = timeutils.utcnow() times = [now + datetime.timedelta(seconds=5 * i) for i in range(4)] self.tasks = [ _db_fixture(UUID1, owner=TENANT1, created_at=times[0], updated_at=times[0]), # FIXME(venkatesh): change the type to include clone and export # once they are included as a valid types under Task domain model. _db_fixture(UUID2, owner=TENANT2, type='import', created_at=times[1], updated_at=times[1]), _db_fixture(UUID3, owner=TENANT3, type='import', created_at=times[2], updated_at=times[2]), _db_fixture(UUID4, owner=TENANT4, type='import', created_at=times[3], updated_at=times[3])] [self.db.task_create(None, task) for task in self.tasks]
def new_namespace(self, namespace, owner, **kwargs): namespace_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefNamespace( namespace_id, namespace, kwargs.get('display_name'), kwargs.get('description'), owner, kwargs.get('visibility'), kwargs.get('protected'), created_at, updated_at )
def _domain_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() task_properties = { 'task_id': task_id, 'status': kwargs.get('status', 'pending'), 'task_type': kwargs.get('type', 'import'), 'owner': kwargs.get('owner', None), 'expires_at': kwargs.get('expires_at', None), 'created_at': kwargs.get('created_at', default_datetime), 'updated_at': kwargs.get('updated_at', default_datetime), 'task_input': kwargs.get('task_input', {}), 'message': kwargs.get('message', None), 'result': kwargs.get('result', None) } task = glance.domain.Task(**task_properties) return task
def new_service(self, service_id=None, status=None, created_at=None, updated_at=None, name=None, schema=None, host=None, port=None, endpoint=None, total_size=0, avail_size=None, disk_wwn=None, file_system_uuid=None, storage_dir=None, extra_properties=None, tags=None, **other_args): extra_properties = extra_properties or {} if service_id is None: service_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at status = 'active' if disk_wwn is None: pass #raise if file_system_uuid is None or storage_dir is None: pass #raise return Service(service_id=service_id, status="active", created_at=created_at, updated_at=updated_at, name=name, schema=schema, host=host, port=port, endpoint=endpoint, total_size=total_size, avail_size=avail_size, disk_wwn=disk_wwn, file_system_uuid=file_system_uuid, storage_dir=storage_dir, extra_properties=extra_properties, tags=tags or tags)
def _db_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': 'pending', 'type': 'import', 'input': {}, 'result': None, 'owner': None, 'message': None, 'expires_at': None, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj
def new_artifact(self, name, version, **kwargs): id = kwargs.pop('id', str(uuid.uuid4())) tags = kwargs.pop('tags', []) # pop reserved fields from kwargs dict for param in ['owner', 'created_at', 'updated_at', 'deleted_at', 'state']: kwargs.pop(param, '') curr_timestamp = timeutils.utcnow() base = self.klass(id=id, name=name, version=version, state='creating', owner=self.context.owner or '', created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs) return base
def new_artifact(self, name, version, **kwargs): id = kwargs.pop('id', str(uuid.uuid4())) tags = kwargs.pop('tags', []) # pop reserved fields from kwargs dict for param in [ 'owner', 'created_at', 'updated_at', 'deleted_at', 'state' ]: kwargs.pop(param, '') curr_timestamp = timeutils.utcnow() base = self.klass(id=id, name=name, version=version, state='creating', owner=self.context.owner or '', created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs) return base
def new_task(self, task_type, owner, task_input=None, **kwargs): task_id = str(uuid.uuid4()) status = 'pending' # Note(nikhil): expires_at would be set on the task, only when it # succeeds or fails. expires_at = None created_at = timeutils.utcnow() updated_at = created_at return Task( task_id, task_type, status, owner, expires_at, created_at, updated_at, task_input, kwargs.get('result'), kwargs.get('message') )
def setUp(self): """Establish a clean test environment""" super(TestRegistryV2Client, self).setUp() db_api.get_engine() self.context = context.RequestContext(is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [ self.get_extra_fixture( id=UUID1, name='fake image #1', is_public=False, disk_format='ami', container_format='ami', size=13, virtual_size=26, properties={'type': 'kernel'}, location="swift://*****:*****@acct/container/obj.tar.0", created_at=uuid1_time), self.get_extra_fixture(id=UUID2, name='fake image #2', properties={}, size=19, virtual_size=38, location="file:///tmp/glance-tests/2", created_at=uuid2_time)] self.destroy_fixtures() self.create_fixtures() self.client = rclient.RegistryClient("0.0.0.0")
def _db_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': 'pending', 'type': 'import', 'input': {}, 'result': None, 'owner': None, 'image_id': 'fake_image_id', 'user_id': 'fake_user', 'request_id': 'fake_request_id', 'message': None, 'expires_at': default_datetime + datetime.timedelta(days=365), 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj
def new_artifact(self, name, version, **kwargs): id = kwargs.pop('id', str(uuid.uuid4())) tags = kwargs.pop('tags', []) # pop reserved fields from kwargs dict for param in ['owner', 'created_at', 'updated_at', 'deleted_at', 'visibility', 'state']: kwargs.pop(param, '') curr_timestamp = timeutils.utcnow() base = self.klass(id=id, name=name, version=version, visibility='private', state='creating', # XXX FIXME remove after using authentication # paste-flavor # (no or '' as owner will always be there) owner=self.context.owner or '', created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs) return base
def test_get_index_sort_created_at_asc(self): """ Tests that the /images registry API returns list of public images sorted by created_at in ascending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='created_at', sort_dir='asc') self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False)
def test_image_get_index_marker_limit(self): """Test correct set of images returned with marker/limit params.""" uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving', created_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(marker=UUID4, limit=1) self.assertEqualImages(images, (UUID2,), unjsonify=False)
def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): extra_properties = extra_properties or {} self._check_readonly(other_args) self._check_unexpected(other_args) self._check_reserved(extra_properties) if image_id is None: image_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at status = 'queued' return Image(image_id=image_id, name=name, status=status, created_at=created_at, updated_at=updated_at, visibility=visibility, min_disk=min_disk, min_ram=min_ram, protected=protected, owner=owner, disk_format=disk_format, container_format=container_format, extra_properties=extra_properties, tags=tags or [])
def new_artifact(self, name, version, **kwargs): id = kwargs.pop("id", str(uuid.uuid4())) tags = kwargs.pop("tags", []) # pop reserved fields from kwargs dict for param in ["owner", "created_at", "updated_at", "deleted_at", "visibility", "state"]: kwargs.pop(param, "") curr_timestamp = timeutils.utcnow() base = self.klass( id=id, name=name, version=version, visibility="private", state="creating", # XXX FIXME remove after using authentication # paste-flavor # (no or '' as owner will always be there) owner=self.context.owner or "", created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs ) return base
def _populate_metadata(meta, metadata_path=None, merge=False, prefer_new=False, overwrite=False): if not metadata_path: metadata_path = CONF.metadata_source_path try: if isfile(metadata_path): json_schema_files = [metadata_path] else: json_schema_files = [f for f in os.listdir(metadata_path) if isfile(join(metadata_path, f)) and f.endswith('.json')] except OSError as e: LOG.error(encodeutils.exception_to_unicode(e)) return if not json_schema_files: LOG.error(_LE("Json schema files not found in %s. Aborting."), metadata_path) return namespaces_table = get_metadef_namespaces_table(meta) namespace_rt_table = get_metadef_namespace_resource_types_table(meta) objects_table = get_metadef_objects_table(meta) tags_table = get_metadef_tags_table(meta) properties_table = get_metadef_properties_table(meta) resource_types_table = get_metadef_resource_types_table(meta) for json_schema_file in json_schema_files: try: file = join(metadata_path, json_schema_file) with open(file) as json_file: metadata = json.load(json_file) except Exception as e: LOG.error(_LE("Failed to parse json file %(file_path)s while " "populating metadata due to: %(error_msg)s"), {"file_path": file, "error_msg": encodeutils.exception_to_unicode(e)}) continue values = { 'namespace': metadata.get('namespace'), 'display_name': metadata.get('display_name'), 'description': metadata.get('description'), 'visibility': metadata.get('visibility'), 'protected': metadata.get('protected'), 'owner': metadata.get('owner', 'admin') } db_namespace = select( [namespaces_table.c.id] ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ).execute().fetchone() if db_namespace and overwrite: LOG.info(_LI("Overwriting namespace %s"), values['namespace']) _clear_namespace_metadata(meta, db_namespace[0]) db_namespace = None if not db_namespace: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespaces_table, values) db_namespace = select( [namespaces_table.c.id] ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ).execute().fetchone() elif not merge: LOG.info(_LI("Skipping namespace %s. It already exists in the " "database."), values['namespace']) continue elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(namespaces_table, values, namespaces_table.c.id, db_namespace[0]) namespace_id = db_namespace[0] for resource_type in metadata.get('resource_type_associations', []): rt_id = _get_resource_type_id(meta, resource_type['name']) if not rt_id: val = { 'name': resource_type['name'], 'created_at': timeutils.utcnow(), 'protected': True } _insert_data_to_db(resource_types_table, val) rt_id = _get_resource_type_id(meta, resource_type['name']) elif prefer_new: val = {'updated_at': timeutils.utcnow()} _update_data_in_db(resource_types_table, val, resource_types_table.c.id, rt_id) values = { 'namespace_id': namespace_id, 'resource_type_id': rt_id, 'properties_target': resource_type.get( 'properties_target'), 'prefix': resource_type.get('prefix') } namespace_resource_type = _get_namespace_resource_type_by_ids( meta, namespace_id, rt_id) if not namespace_resource_type: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespace_rt_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_rt_association(namespace_rt_table, values, rt_id, namespace_id) for property, schema in six.iteritems(metadata.get('properties', {})): values = { 'name': property, 'namespace_id': namespace_id, 'json_schema': json.dumps(schema) } property_id = _get_resource_id(properties_table, namespace_id, property) if not property_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(properties_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(properties_table, values, properties_table.c.id, property_id) for object in metadata.get('objects', []): values = { 'name': object['name'], 'description': object.get('description'), 'namespace_id': namespace_id, 'json_schema': json.dumps( object.get('properties')) } object_id = _get_resource_id(objects_table, namespace_id, object['name']) if not object_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(objects_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(objects_table, values, objects_table.c.id, object_id) for tag in metadata.get('tags', []): values = { 'name': tag.get('name'), 'namespace_id': namespace_id, } tag_id = _get_resource_id(tags_table, namespace_id, tag['name']) if not tag_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(tags_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(tags_table, values, tags_table.c.id, tag_id) LOG.info(_LI("File %s loaded to database."), file) LOG.info(_LI("Metadata loading finished"))
def fail(self, message): new_status = 'failure' self.message = message self._set_task_status(new_status) self.expires_at = timeutils.utcnow() + self._time_to_live