def _wait_on_task_execution(self): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of 5 seconds while timeutils.delta_seconds(start, timeutils.utcnow()) < 5: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(res.status, 200) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: time.sleep(0.05) continue else: break
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def get_extra_fixture(self, id, name, **kwargs): return self.get_fixture( id=id, name=name, deleted=False, deleted_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), **kwargs )
def task_delete(context, task_id): global DATA try: DATA['tasks'][task_id]['deleted'] = True DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow() DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow() return copy.deepcopy(DATA['tasks'][task_id]) except KeyError: msg = (_("No task found with ID %s") % task_id) LOG.debug(msg) raise exception.TaskNotFound(task_id=task_id)
def __call__(self, request): now = timeutils.utcnow() reqBody = "-" if 'xml' in str(request.content_type) or 'json' in str(request.content_type): if request.content_length is not None and request.content_length < 10240: reqBody = str(request.body) or '-' if HWExtend.hasSensitiveStr(reqBody): reqBody = '-' data = { 'remote_addr': request.remote_addr, 'remote_user': request.remote_user or '-', 'token_id':"None", 'request_datetime':'%s' % now.strftime(APACHE_TIME_FORMAT), 'response_datetime':'%s' % now.strftime(APACHE_TIME_FORMAT), 'method': request.method, 'url': request.url, 'http_version': request.http_version, 'status': 500, 'content_length': '-', 'request_body':reqBody, 'image_id': '-'} token = '' try: token = request.headers['X-Auth-Token'] token = HWExtend.b64encodeToken(token) except: token = "-" try: response = request.get_response(self.application) data['status'] = response.status_int data['content_length'] = response.content_length or '-' finally: # must be calculated *after* the application has been called now = timeutils.utcnow() data['token_id'] = token if "GET" in data['method'] and "/tokens/" in data['url']: Pos = data['url'].find("tokens") + 7 logToken = data['url'][Pos:Pos+32] encodedToken = HWExtend.b64encodeToken(logToken) data['url'] = data['url'].replace(logToken,encodedToken) elif "POST" in data['method'] and data['url'].endswith("/images"): if int(response.status_int) < 400: try: resp_body = json.loads(response.body) image_id = resp_body.get('id', None) if image_id is not None: data['image_id'] = image_id except Exception: pass # timeutils may not return UTC, so we can't hardcode +0000 data['response_datetime'] = '%s' % (now.strftime(APACHE_TIME_FORMAT)) log.info(DRM_LOG_FORMAT % data, extra={"type":"operate"}) return response
def test_task_invalid_status(self): task_id = str(uuid.uuid4()) status = 'blah' self.assertRaises( exception.InvalidTaskStatus, domain.Task, task_id, task_type='import', status=status, owner=None, expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow() )
def test_image_create_defaults(self): timeutils.set_time_override() image = self.db_api.image_create(self.context, {"status": "queued"}) create_time = timeutils.utcnow() self.assertEqual(None, image["name"]) self.assertEqual(None, image["container_format"]) self.assertEqual(0, image["min_ram"]) self.assertEqual(0, image["min_disk"]) self.assertEqual(None, image["owner"]) self.assertEqual(False, image["is_public"]) self.assertEqual(None, image["size"]) self.assertEqual(None, image["checksum"]) self.assertEqual(None, image["disk_format"]) self.assertEqual([], image["locations"]) self.assertEqual(False, image["protected"]) self.assertEqual(False, image["deleted"]) self.assertEqual(None, image["deleted_at"]) self.assertEqual([], image["properties"]) self.assertEqual(image["created_at"], create_time) self.assertEqual(image["updated_at"], create_time) # Image IDs aren't predictable, but they should be populated self.assertTrue(uuid.UUID(image["id"])) # NOTE(bcwaldon): the tags attribute should not be returned as a part # of a core image entity self.assertFalse("tags" in image)
def test_image_property_delete(self): fixture = {"name": "ping", "value": "pong", "image_id": UUID1} prop = self.db_api.image_property_create(self.context, fixture) timeutils.set_time_override() prop = self.db_api.image_property_delete(self.context, prop) self.assertEqual(prop["deleted_at"], timeutils.utcnow()) self.assertTrue(prop["deleted"])
def _image_format(image_name, **values): dt = timeutils.utcnow() image = { 'id': _make_uuid(image_name), 'name': image_name, 'owner': None, 'locations': [], 'status': 'active', 'protected': False, 'is_public': True, 'container_format': 'docker', 'disk_format': 'docker', 'min_ram': 0, 'min_disk': 0, 'size': 0, 'checksum': None, 'tags': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } properties = values.pop('properties', {}) properties = [{'name': k, 'value': v, 'deleted': False} for k, v in properties.items()] image['properties'] = properties image.update(values) return image
def image_location_update(context, image_id, location): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None updated = False for loc in DATA['locations']: if (loc['id'] == loc_id and loc['image_id'] == image_id): loc.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) updated = True break if not updated: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg)
def get_extra_fixture(self, id, name, **kwargs): created_at = kwargs.pop('created_at', timeutils.utcnow()) updated_at = kwargs.pop('updated_at', created_at) return self.get_fixture( id=id, name=name, deleted=False, deleted_at=None, created_at=created_at, updated_at=updated_at, **kwargs)
def image_destroy(context, image_id): global DATA try: DATA['images'][image_id]['deleted'] = True DATA['images'][image_id]['deleted_at'] = timeutils.utcnow() except KeyError: raise exception.NotFound()
def _image_format(image_id, **values): dt = timeutils.utcnow() image = { "id": image_id, "name": None, "owner": None, "locations": [], "status": "queued", "protected": False, "is_public": False, "container_format": None, "disk_format": None, "min_ram": 0, "min_disk": 0, "size": None, "checksum": None, "tags": [], "created_at": dt, "updated_at": dt, "deleted_at": None, "deleted": False, } # NOTE(bcwaldon): store properties as a list to match sqlalchemy driver properties = values.pop("properties", {}) properties = [{"name": k, "value": v, "deleted": False} for k, v in properties.items()] image["properties"] = properties image.update(values) return image
def test_image_property_delete(self): fixture = {'name': 'ping', 'value': 'pong', 'image_id': UUID1} prop = self.db_api.image_property_create(self.context, fixture) timeutils.set_time_override() prop = self.db_api.image_property_delete(self.context, prop) self.assertEqual(prop['deleted_at'], timeutils.utcnow()) self.assertTrue(prop['deleted'])
def test_image_member_create(self): timeutils.set_time_override() memberships = self.db_api.image_member_find(self.context) self.assertEqual([], memberships) create_time = timeutils.utcnow() TENANT1 = uuidutils.generate_uuid() self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) memberships = self.db_api.image_member_find(self.context) self.assertEqual(1, len(memberships)) actual = memberships[0] self.assertEqual(actual['created_at'], create_time) self.assertEqual(actual['updated_at'], create_time) actual.pop('id') actual.pop('created_at') actual.pop('updated_at') expected = { 'member': TENANT1, 'image_id': UUID1, 'can_share': False, 'status': 'pending', } self.assertEqual(expected, actual)
def image_destroy(context, image_id): global DATA try: DATA['images'][image_id]['deleted'] = True DATA['images'][image_id]['deleted_at'] = timeutils.utcnow() # NOTE(flaper87): Move the image to one of the deleted statuses # if it hasn't been done yet. if (DATA['images'][image_id]['status'] not in ['deleted', 'pending_delete']): DATA['images'][image_id]['status'] = 'deleted' _image_locations_set(image_id, []) for prop in DATA['images'][image_id]['properties']: image_property_delete(context, prop['name'], image_id) members = image_member_find(context, image_id=image_id) for member in members: image_member_delete(context, member['id']) tags = image_tag_get_all(context, image_id) for tag in tags: image_tag_delete(context, image_id, tag) _normalize_locations(DATA['images'][image_id]) return copy.deepcopy(DATA['images'][image_id]) except KeyError: raise exception.NotFound()
def test_image_create_defaults(self): timeutils.set_time_override() image = self.db_api.image_create(self.context, {'status': 'queued'}) create_time = timeutils.utcnow() self.assertEqual(None, image['name']) self.assertEqual(None, image['container_format']) self.assertEqual(0, image['min_ram']) self.assertEqual(0, image['min_disk']) self.assertEqual(None, image['owner']) self.assertEqual(False, image['is_public']) self.assertEqual(None, image['size']) self.assertEqual(None, image['checksum']) self.assertEqual(None, image['disk_format']) self.assertEqual([], image['locations']) self.assertEqual(False, image['protected']) self.assertEqual(False, image['deleted']) self.assertEqual(None, image['deleted_at']) self.assertEqual([], image['properties']) self.assertEqual(image['created_at'], create_time) self.assertEqual(image['updated_at'], create_time) # Image IDs aren't predictable, but they should be populated self.assertTrue(uuid.UUID(image['id'])) #NOTE(bcwaldon): the tags attribute should not be returned as a part # of a core image entity self.assertFalse('tags' in image)
def image_location_update(context, image_id, location, session=None): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) try: session = session or get_session() location_ref = session.query(models.ImageLocation)\ .filter_by(id=loc_id)\ .filter_by(image_id=image_id)\ .one() deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None location_ref.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg)
def _image_format(image_id, **values): dt = timeutils.utcnow() image = { 'id': image_id, 'name': None, 'owner': None, 'location': None, 'status': 'queued', 'protected': False, 'is_public': False, 'container_format': None, 'disk_format': None, 'min_ram': 0, 'min_disk': 0, 'size': None, 'checksum': None, 'tags': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } #NOTE(bcwaldon): store properties as a list to match sqlalchemy driver properties = values.pop('properties', {}) properties = [{'name': k, 'value': v, 'deleted': False} for k, v in properties.items()] image['properties'] = properties image.update(values) return image
def _image_child_entry_delete_all(child_model_cls, image_id, delete_time=None, session=None): """Deletes all the child entries for the given image id. Deletes all the child entries of the given child entry ORM model class using the parent image's id. The child entry ORM model class can be one of the following: model.ImageLocation, model.ImageProperty, model.ImageMember and model.ImageTag. :param child_model_cls: the ORM model class. :param image_id: id of the image whose child entries are to be deleted. :param delete_time: datetime of deletion to be set. If None, uses current datetime. :param session: A SQLAlchemy session to use (if present) :rtype: int :return: The number of child entries got soft-deleted. """ session = session or get_session() query = session.query(child_model_cls) \ .filter_by(image_id=image_id) \ .filter_by(deleted=False) delete_time = delete_time or timeutils.utcnow() count = query.update({"deleted": True, "deleted_at": delete_time}) return count
def image_location_delete(context, image_id, location_id, status, delete_time=None, session=None): if status not in ('deleted', 'pending_delete'): msg = _("The status of deleted image location can only be set to " "'pending_delete' or 'deleted'") raise exception.Invalid(msg) try: session = session or get_session() location_ref = session.query(models.ImageLocation)\ .filter_by(id=location_id)\ .filter_by(image_id=image_id)\ .one() delete_time = delete_time or timeutils.utcnow() location_ref.update({"deleted": True, "status": status, "updated_at": delete_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=location_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg)
def new_image_member(self, image, member_id): created_at = timeutils.utcnow() updated_at = created_at return ImageMembership(image_id=image.image_id, member_id=member_id, created_at=created_at, updated_at=updated_at, status='pending')
def image_destroy(context, image_id): global DATA try: DATA["images"][image_id]["deleted"] = True DATA["images"][image_id]["deleted_at"] = timeutils.utcnow() # NOTE(flaper87): Move the image to one of the deleted statuses # if it hasn't been done yet. if DATA["images"][image_id]["status"] not in ["deleted", "pending_delete"]: DATA["images"][image_id]["status"] = "deleted" _image_locations_set(image_id, []) for prop in DATA["images"][image_id]["properties"]: image_property_delete(context, prop["name"], image_id) members = image_member_find(context, image_id=image_id) for member in members: image_member_delete(context, member["id"]) tags = image_tag_get_all(context, image_id) for tag in tags: image_tag_delete(context, image_id, tag) _normalize_locations(DATA["images"][image_id]) return copy.deepcopy(DATA["images"][image_id]) except KeyError: raise exception.NotFound()
def image_update(context, image_id, image_values, purge_props=False, from_state=None): global DATA try: image = DATA["images"][image_id] except KeyError: raise exception.NotFound() location_data = image_values.pop("locations", None) if location_data is not None: _image_locations_set(image_id, location_data) # replace values for properties that already exist new_properties = image_values.pop("properties", {}) for prop in image["properties"]: if prop["name"] in new_properties: prop["value"] = new_properties.pop(prop["name"]) elif purge_props: # this matches weirdness in the sqlalchemy api prop["deleted"] = True # add in any completly new properties image["properties"].extend( [{"name": k, "value": v, "image_id": image_id, "deleted": False} for k, v in new_properties.items()] ) image["updated_at"] = timeutils.utcnow() image.update(image_values) DATA["images"][image_id] = image return _normalize_locations(image)
def test_get_index_sort_updated_at_desc(self): """ Tests that the registry API returns list of public images sorted by updated_at in descending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=None, updated_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key='updated_at', sort_dir='desc') self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False)
def image_update(context, image_id, image_values, purge_props=False): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.NotFound() location_data = image_values.pop('locations', None) if location_data is not None: _image_locations_set(image_id, location_data) # replace values for properties that already exist new_properties = image_values.pop('properties', {}) for prop in image['properties']: if prop['name'] in new_properties: prop['value'] = new_properties.pop(prop['name']) elif purge_props: # this matches weirdness in the sqlalchemy api prop['deleted'] = True # add in any completly new properties image['properties'].extend([{'name': k, 'value': v, 'image_id': image_id, 'deleted': False} for k, v in new_properties.items()]) image['updated_at'] = timeutils.utcnow() image.update(image_values) DATA['images'][image_id] = image return _normalize_locations(image)
def setUp(self): super(TestRegistryRPC, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) self.FIXTURES = [ {'id': UUID1, 'name': 'fake image #1', 'status': 'active', 'disk_format': 'ami', 'container_format': 'ami', 'is_public': False, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 0, 'min_ram': 0, 'size': 13, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), 'metadata': {}}], 'properties': {'type': 'kernel'}}, {'id': UUID2, 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2), 'metadata': {}}], 'properties': {}}] self.context = glance.context.RequestContext(is_admin=True) db_api.setup_db_env() db_api.get_engine() self.destroy_fixtures() self.create_fixtures()
def test_task_invalid_status(self): task_id = uuidutils.generate_uuid() status = 'blah' self.assertRaises( exception.InvalidTaskStatus, domain.Task, task_id, type='import', status=status, input=None, result=None, owner=None, message=None, expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow() )
def image_destroy(context, image_id): global DATA try: DATA["images"][image_id]["deleted"] = True DATA["images"][image_id]["deleted_at"] = timeutils.utcnow() return copy.deepcopy(DATA["images"][image_id]) except KeyError: raise exception.NotFound()
def generate_message(event_type, priority, payload): return { "message_id": str(uuid.uuid4()), "publisher_id": socket.gethostname(), "event_type": event_type, "priority": priority, "payload": payload, "timestamp": str(timeutils.utcnow()), }
def _pre_upgrade_004(self, engine): """Insert checksum data sample to check if migration goes fine with data. """ now = timeutils.utcnow() images = get_table(engine, 'images') data = [{ 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True, }] engine.execute(images.insert(), data) return data
def _domain_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() task_properties = { 'task_id': task_id, 'status': kwargs.get('status', 'pending'), 'task_type': kwargs.get('type', 'import'), 'owner': kwargs.get('owner', None), 'expires_at': kwargs.get('expires_at', None), 'created_at': kwargs.get('created_at', default_datetime), 'updated_at': kwargs.get('updated_at', default_datetime), 'task_input': kwargs.get('task_input', {}), 'message': kwargs.get('message', None), 'result': kwargs.get('result', None) } task = glance.domain.Task(**task_properties) return task
def test_get_index_sort_updated_at_desc(self): """ Tests that the /images registry API returns list of public images sorted by updated_at in descending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = { 'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': None, 'updated_at': time1 } db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = { 'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': time2 } db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='updated_at', sort_dir='desc') self.assertEquals(len(images), 3) self.assertEquals(images[0]['id'], UUID3) self.assertEquals(images[1]['id'], UUID4) self.assertEquals(images[2]['id'], UUID2)
def _image_locations_set(image_id, locations): global DATA image = DATA['images'][image_id] for location in image['locations']: location['deleted'] = True location['deleted_at'] = timeutils.utcnow() for i, location in enumerate(DATA['locations']): if image_id == location['image_id'] and location['deleted'] is False: del DATA['locations'][i] for location in locations: location_ref = _image_locations_format(image_id, value=location['url'], meta_data=location['metadata']) DATA['locations'].append(location_ref) image['locations'].append(location_ref)
def _image_format(image_id, **values): dt = timeutils.utcnow() image = { 'id': image_id, 'name': None, 'owner': None, 'locations': [], 'status': 'queued', 'protected': False, 'is_public': False, 'container_format': None, 'disk_format': None, 'min_ram': 0, 'min_disk': 0, 'size': None, 'virtual_size': None, 'checksum': None, 'tags': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } locations = values.pop('locations', None) if locations is not None: image['locations'] = [] for location in locations: location_ref = _image_location_format(image_id, location['url'], location['metadata'], location['status']) image['locations'].append(location_ref) DATA['locations'].append(location_ref) #NOTE(bcwaldon): store properties as a list to match sqlalchemy driver properties = values.pop('properties', {}) properties = [{ 'name': k, 'value': v, 'image_id': image_id, 'deleted': False } for k, v in properties.items()] image['properties'] = properties image.update(values) return image
def _db_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': 'pending', 'type': 'import', 'input': {}, 'result': None, 'owner': None, 'message': None, 'expires_at': None, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj
def image_update(context, image_id, image_values): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.NotFound(image_id=image_id) properties = image_values.pop('properties', {}) properties = [{ 'name': k, 'value': v, 'deleted': False } for k, v in properties.items()] image['properties'] = properties image['updated_at'] = timeutils.utcnow() image.update(image_values) DATA['images'][image_id] = image return image
def new_task(self, task_type, owner, task_time_to_live=48): task_id = str(uuid.uuid4()) status = 'pending' # Note(nikhil): expires_at would be set on the task, only when it # succeeds or fails. expires_at = None created_at = timeutils.utcnow() updated_at = created_at return Task( task_id, task_type, status, owner, expires_at, created_at, updated_at, task_time_to_live )
def _task_format(task_id, **values): dt = timeutils.utcnow() task = { 'id': task_id, 'type': 'import', 'status': 'pending', 'input': None, 'result': None, 'owner': None, 'message': None, 'expires_at': None, 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } task.update(values) return task
def task_update(context, task_id, values): """Update a task object""" global DATA task_values = copy.deepcopy(values) task_info_values = _pop_task_info_values(task_values) try: task = DATA['tasks'][task_id] except KeyError: msg = (_("No task found with ID %s") % task_id) LOG.debug(msg) raise exception.TaskNotFound(task_id=task_id) task.update(task_values) task['updated_at'] = timeutils.utcnow() DATA['tasks'][task_id] = task task_info = _task_info_update(task['id'], task_info_values) return _format_task_from_db(task, task_info)
def task_update(context, task_id, values, session=None): """Update a task object""" session = session or get_session() with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = _task_get(context, task_id, session) _drop_protected_attrs(models.Task, values) values['updated_at'] = timeutils.utcnow() _task_update(context, task_ref, values, session) if task_info_values: _task_info_update(context, task_id, task_info_values, session) return task_get(context, task_id, session)
def setUp(self): """Establish a clean test environment""" super(TestRegistryV2Client, self).setUp() db_api.get_engine() self.context = context.RequestContext(is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [ self.get_extra_fixture( id=UUID1, name='fake image #1', is_public=False, disk_format='ami', container_format='ami', size=13, virtual_size=26, properties={'type': 'kernel'}, location="swift://*****:*****@acct/container/obj.tar.0", created_at=uuid1_time), self.get_extra_fixture(id=UUID2, name='fake image #2', properties={}, size=19, virtual_size=38, location="file:///tmp/glance-tests/2", created_at=uuid2_time)] self.destroy_fixtures() self.create_fixtures() self.client = rclient.RegistryClient("0.0.0.0")
def _pre_upgrade_006(self, engine): now = timeutils.utcnow() images = get_table(engine, 'images') image_data = [ { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True, 'id': 9999, } ] engine.execute(images.insert(), image_data) images_properties = get_table(engine, 'image_properties') properties_data = [ { 'id': 10, 'image_id': 9999, 'updated_at': now, 'created_at': now, 'deleted': False, 'key': 'image_name' } ] engine.execute(images_properties.insert(), properties_data) return properties_data
def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): self._check_readonly(other_args) self._check_unexpected(other_args) self._check_reserved(extra_properties) if image_id is None: image_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at status = 'queued' return Image(image_id=image_id, name=name, status=status, created_at=created_at, updated_at=updated_at, visibility=visibility, min_disk=min_disk, min_ram=min_ram, protected=protected, owner=owner, disk_format=disk_format, container_format=container_format, extra_properties=extra_properties, tags=tags)
def test_get_index_sort_created_at_asc(self): """ Tests that the /images registry API returns list of public images sorted by created_at in ascending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='created_at', sort_dir='asc') self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False)
def _pre_upgrade_012(self, engine): """Test rows in images have id changes from int to varchar(32) and value changed from int to UUID. Also test image_members and image_properties gets updated to point to new UUID keys""" images = get_table(engine, 'images') image_members = get_table(engine, 'image_members') image_properties = get_table(engine, 'image_properties') # Insert kernel, ramdisk and normal images now = timeutils.utcnow() data = {'created_at': now, 'updated_at': now, 'status': 'active', 'deleted': False, 'is_public': True, 'min_disk': 0, 'min_ram': 0} test_data = {} for name in ('kernel', 'ramdisk', 'normal'): data['name'] = '%s migration 012 test' % name result = images.insert().values(data).execute() test_data[name] = result.inserted_primary_key[0] # Insert image_members and image_properties rows data = {'created_at': now, 'updated_at': now, 'deleted': False, 'image_id': test_data['normal'], 'member': 'foobar', 'can_share': False} result = image_members.insert().values(data).execute() test_data['member'] = result.inserted_primary_key[0] data = {'created_at': now, 'updated_at': now, 'deleted': False, 'image_id': test_data['normal'], 'name': 'ramdisk_id', 'value': test_data['ramdisk']} result = image_properties.insert().values(data).execute() test_data['properties'] = [result.inserted_primary_key[0]] data.update({'name': 'kernel_id', 'value': test_data['kernel']}) result = image_properties.insert().values(data).execute() test_data['properties'].append(result.inserted_primary_key) return test_data
def adding_locations(self, image_id, auth_token, locs, **kwargs): if CONF.sync.sync_strategy == 'None': return for loc in locs: if s_utils.is_glance_location(loc['url']): if s_utils.is_snapshot_location(loc): snapshot_ep = s_utils.create_ep_by_loc(loc) snapshot_id = s_utils.get_id_from_glance_loc(loc) snapshot_client = create_glance_client( auth_token, snapshot_ep) snapshot_image = snapshot_client.images.get(snapshot_id) _pre_check_time = timeutils.utcnow() _timout = CONF.sync.snapshot_timeout while not timeutils.is_older_than(_pre_check_time, _timout): if snapshot_image.status == 'active': break LOG.debug( _('Check snapshot not active, wait for %i' 'second.' % CONF.sync.snapshot_sleep_interval)) time.sleep(CONF.sync.snapshot_sleep_interval) snapshot_image = snapshot_client.images.get( snapshot_id) if snapshot_image.status != 'active': LOG.error(_('Snapshot status to active Timeout')) return kwargs['image_id'] = image_id kwargs['snapshot_ep'] = snapshot_ep kwargs['snapshot_id'] = snapshot_id snapshot_task = TaskObject.get_instance('snapshot', kwargs) self.task_queue.put_nowait(snapshot_task) else: LOG.debug( _('patch a normal location %s to image %s' % (loc['url'], image_id))) input = {'image_id': image_id, 'location': loc} self.task_queue.put_nowait( TaskObject.get_instance('patch', input))
def test_image_get_index_marker_limit(self): """Test correct set of images returned with marker/limit params.""" uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving', created_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(marker=UUID4, limit=1) self.assertEqualImages(images, (UUID2,), unjsonify=False)
def build_image_fixture(**kwargs): default_datetime = timeutils.utcnow() image = { 'id': uuidutils.generate_uuid(), 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': ["file:///tmp/glance-tests/2"], 'properties': {}, } image.update(kwargs) return image
def image_destroy(context, image_id): global DATA try: DATA['images'][image_id]['deleted'] = True DATA['images'][image_id]['deleted_at'] = timeutils.utcnow() _image_locations_set(image_id, []) for prop in DATA['images'][image_id]['properties']: image_property_delete(context, prop['name'], image_id) members = image_member_find(context, image_id=image_id) for member in members: image_member_delete(context, member['id']) tags = image_tag_get_all(context, image_id) for tag in tags: image_tag_delete(context, image_id, tag) _normalize_locations(DATA['images'][image_id]) return copy.deepcopy(DATA['images'][image_id]) except KeyError: raise exception.NotFound()
def test_get_index_sort_status_desc(self): """ Tests that the registry API returns list of public images sorted alphabetically by status in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', status='queued') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key='status', sort_dir='desc') self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False)
def image_update(context, image_id, image_values, purge_props=False): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.NotFound(image_id=image_id) # replace values for properties that already exist new_properties = image_values.pop('properties', {}) for prop in image['properties']: if prop['name'] in new_properties: prop['value'] = new_properties.pop(prop['name']) elif purge_props: # this matches weirdness in the sqlalchemy api prop['deleted'] = True # add in any completly new properties image['properties'].extend([{'name': k, 'value': v, 'deleted': False} for k, v in new_properties.items()]) image['updated_at'] = timeutils.utcnow() image.update(image_values) DATA['images'][image_id] = image return image
def image_update(context, image_id, image_values, purge_props=False, from_state=None): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.NotFound() location_data = image_values.pop('locations', None) if location_data is not None: _image_locations_set(context, image_id, location_data) # replace values for properties that already exist new_properties = image_values.pop('properties', {}) for prop in image['properties']: if prop['name'] in new_properties: prop['value'] = new_properties.pop(prop['name']) elif purge_props: # this matches weirdness in the sqlalchemy api prop['deleted'] = True # add in any completely new properties image['properties'].extend([{ 'name': k, 'value': v, 'image_id': image_id, 'deleted': False } for k, v in new_properties.items()]) image['updated_at'] = timeutils.utcnow() image.update(image_values) DATA['images'][image_id] = image return _normalize_locations(copy.deepcopy(image))
def test_get_index_sort_default_created_at_desc(self): """ Tests that the registry API returns list of public images that conforms to a default sort key/dir """ uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid4_time = uuid5_time + datetime.timedelta(seconds=5) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = { 'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time } db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = { 'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time } db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = { 'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid5_time, 'updated_at': uuid5_time } db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', }] req.body = jsonutils.dumps(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(res.status_int, 200) images = res_dict # (flaper87)registry's v1 forced is_public to True # when no value was specified. This is not # the default behaviour anymore. self.assertEqual(len(images), 5) self.assertEqual(images[0]['id'], UUID3) self.assertEqual(images[1]['id'], UUID4) self.assertEqual(images[2]['id'], UUID5) self.assertEqual(images[3]['id'], UUID2) self.assertEqual(images[4]['id'], UUID1)
def delete(self, session=None): """Delete this object""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session)
def test_get_image_details_with_changes_since(self): """Tests that a detailed call can be filtered by changes-since""" dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) dt3 = timeutils.utcnow() + datetime.timedelta(2) iso3 = timeutils.isotime(dt3) dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = { 'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'fake image #3', 'size': 18, 'checksum': None } db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = { 'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'fake image #4', 'size': 20, 'checksum': None, 'created_at': dt3, 'updated_at': dt3 } db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) images = self.client.get_images_detailed(filters={}) self.assertEqual(images[0]['id'], UUID4) self.assertEqual(images[1]['id'], UUID2) # Expect 3 images (1 deleted) filters = {'changes-since': iso1} images = self.client.get_images(filters=filters) self.assertEquals(len(images), 3) self.assertEqual(images[0]['id'], UUID4) self.assertEqual(images[1]['id'], UUID3) # deleted self.assertEqual(images[2]['id'], UUID2) # Expect 1 images (0 deleted) filters = {'changes-since': iso2} images = self.client.get_images_detailed(filters=filters) self.assertEquals(len(images), 1) self.assertEqual(images[0]['id'], UUID4) # Expect 0 images (0 deleted) filters = {'changes-since': iso4} images = self.client.get_images(filters=filters) self.assertEquals(len(images), 0)
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)
def test_change_updated_at(self): self._test_change('updated_at', timeutils.utcnow())
def test_change_created_at(self): self._test_change('created_at', timeutils.utcnow())
def setUp(self): super(TestRegistryRPC, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [{ 'id': UUID1, 'name': 'fake image #1', 'status': 'active', 'disk_format': 'ami', 'container_format': 'ami', 'is_public': False, 'created_at': uuid1_time, 'updated_at': uuid1_time, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 0, 'min_ram': 0, 'size': 13, 'locations': [{ 'url': "file:///%s/%s" % (self.test_dir, UUID1), 'metadata': {} }], 'properties': { 'type': 'kernel' } }, { 'id': UUID2, 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': uuid2_time, 'updated_at': uuid2_time, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': [{ 'url': "file:///%s/%s" % (self.test_dir, UUID2), 'metadata': {} }], 'properties': {} }] self.context = glance.context.RequestContext(is_admin=True) db_api.get_engine() self.destroy_fixtures() self.create_fixtures()