def _ticket_metadata( self, source=DEFAULT_SOURCE, destination=DEFAULT_DEST, nonce=DEFAULT_NONCE, timestamp=None, b64encode=True ): if not timestamp: timestamp = timeutils.utcnow() return {"source": source, "destination": destination, "nonce": nonce, "timestamp": timestamp}
def test_invalid_expired_request(self): self._add_key(DEFAULT_SOURCE) self._add_key(DEFAULT_DEST) timestamp = timeutils.utcnow() - datetime.timedelta(hours=5) self._request_ticket(status=401, timestamp=timestamp)
def __init__(self, **kwargs): super(BaseRequest, self).__init__(**kwargs) self._cache = dict() self.now = timeutils.utcnow() # NOTE(jamielennox): This is essentially a class variable, however # that confuses WSME. self.destination_is_group = None
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info( _LI("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def _ticket_metadata(self, source=DEFAULT_SOURCE, destination=DEFAULT_DEST, nonce=DEFAULT_NONCE, timestamp=None, b64encode=True): if not timestamp: timestamp = timeutils.utcnow() return { 'source': source, 'destination': destination, 'nonce': nonce, 'timestamp': timestamp }
def soft_delete(self, session): """Mark this object as deleted.""" self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session)
class TimestampMixin(object): created_at = Column(DateTime, default=lambda: timeutils.utcnow()) updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
def get_key(self, name, generation=None, group=None): """Retrieves a key from the driver and decrypts it for use. If it is a group key and it has expired or is not found then generate a new one and return that for use. :param string name: Key Identifier :param int generation: Key generation to retrieve. Default latest """ key = dbapi.get_instance().get_key(name, generation=generation, group=group) crypto_manager = crypto.CryptoManager.get_instance() if not key: # host or group not found raise exception.KeyNotFound(name=name, generation=generation) if group is not None and group != key['group']: raise exception.KeyNotFound(name=name, generation=generation) now = timeutils.utcnow() expiration = key.get('expiration') if key['group'] and expiration and generation is not None: # if you ask for a specific group key generation then you can # retrieve it for a little while beyond it being expired timeout = expiration + datetime.timedelta(minutes=10) elif key['group'] and expiration: # when we can generate a new key we don't want to use an older one # that is just going to require refreshing soon timeout = expiration - datetime.timedelta(minutes=2) else: # otherwise we either have an un-expiring group or host key which # we just check against now timeout = expiration if timeout and now >= timeout: if key['group']: # clear the key so it will generate a new group key key = {'group': True} else: raise exception.KeyNotFound(name=name, generation=generation) if 'key' in key: dec_key = crypto_manager.decrypt_key(name, enc_key=key['key'], signature=key['signature']) return {'key': dec_key, 'generation': key['generation'], 'name': key['name'], 'group': key['group']} if generation is not None or not key['group']: # A specific generation was asked for or it's not a group key # so don't generate a new one raise exception.KeyNotFound(name=name, generation=generation) # generate and return a new group key new_key = crypto_manager.new_key() enc_key, signature = crypto_manager.encrypt_key(name, new_key) expiration = now + datetime.timedelta(minutes=15) new_gen = dbapi.get_instance().set_key(name, key=enc_key, signature=signature, group=True, expiration=expiration) return {'key': new_key, 'generation': new_gen, 'name': name, 'group': True, 'expiration': expiration}
def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session)
def test_expired(self): past = timeutils.utcnow() - datetime.timedelta(minutes=10) self.STORAGE.set_key(TEST_NAME, TEST_KEY, past) self.assertRaises(exception.KeyNotFound, self.STORAGE.get_key, TEST_NAME)
def get_key(self, name, generation=None, group=None): """Retrieves a key from the driver and decrypts it for use. If it is a group key and it has expired or is not found then generate a new one and return that for use. :param string name: Key Identifier :param int generation: Key generation to retrieve. Default latest """ key = dbapi.get_instance().get_key(name, generation=generation, group=group) crypto_manager = crypto.CryptoManager.get_instance() if not key: # host or group not found raise exception.KeyNotFound(name=name, generation=generation) if group is not None and group != key['group']: raise exception.KeyNotFound(name=name, generation=generation) now = timeutils.utcnow() expiration = key.get('expiration') if key['group'] and expiration and generation is not None: # if you ask for a specific group key generation then you can # retrieve it for a little while beyond it being expired timeout = expiration + datetime.timedelta(minutes=10) elif key['group'] and expiration: # when we can generate a new key we don't want to use an older one # that is just going to require refreshing soon timeout = expiration - datetime.timedelta(minutes=2) else: # otherwise we either have an un-expiring group or host key which # we just check against now timeout = expiration if timeout and now >= timeout: if key['group']: # clear the key so it will generate a new group key key = {'group': True} else: raise exception.KeyNotFound(name=name, generation=generation) if 'key' in key: dec_key = crypto_manager.decrypt_key(name, enc_key=key['key'], signature=key['signature']) return { 'key': dec_key, 'generation': key['generation'], 'name': key['name'], 'group': key['group'] } if generation is not None or not key['group']: # A specific generation was asked for or it's not a group key # so don't generate a new one raise exception.KeyNotFound(name=name, generation=generation) # generate and return a new group key new_key = crypto_manager.new_key() enc_key, signature = crypto_manager.encrypt_key(name, new_key) expiration = now + datetime.timedelta(minutes=15) new_gen = dbapi.get_instance().set_key(name, key=enc_key, signature=signature, group=True, expiration=expiration) return { 'key': new_key, 'generation': new_gen, 'name': name, 'group': True, 'expiration': expiration }