def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ignore_conflict=False): id_generator = id_generator or self.id_generator record = {**record} if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) if ignore_conflict: return existing raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() # Remove redundancy in data field query_record = {**record} query_record.pop(id_field, None) query_record.pop(modified_field, None) query = """ WITH delete_potential_tombstone AS ( DELETE FROM deleted WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id ) INSERT INTO records (id, parent_id, collection_id, data, last_modified) VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified)) %(on_conflict)s RETURNING id, as_epoch(last_modified) AS last_modified; """ safe_holders = {"on_conflict": ""} if ignore_conflict: # We use DO UPDATE so that the RETURNING clause works # but we don't update anything and keep the previous # last_modified value already stored. safe_holders["on_conflict"] = """ ON CONFLICT (id, parent_id, collection_id) DO UPDATE SET last_modified = EXCLUDED.last_modified """ placeholders = dict(object_id=record[id_field], parent_id=parent_id, collection_id=collection_id, last_modified=record.get(modified_field), data=json.dumps(query_record)) with self.client.connect() as conn: result = conn.execute(query % safe_holders, placeholders) inserted = result.fetchone() record[modified_field] = inserted['last_modified'] return record
def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = record.copy() if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() self.set_record_timestamp(collection_id, parent_id, record, modified_field=modified_field) _id = record[id_field] self._store[parent_id][collection_id][_id] = record self._cemetery[parent_id][collection_id].pop(_id, None) return record
def check_unicity(self, collection_id, parent_id, record, unique_fields, id_field, for_creation=False): """Check that the specified record does not violates unicity constraints defined in the resource's mapping options. """ if for_creation and id_field in record: # If id is provided by client, check that no record conflicts. unique_fields = (unique_fields or tuple()) + (id_field, ) if not unique_fields: return unicity_rules = get_unicity_rules(collection_id, parent_id, record, unique_fields=unique_fields, id_field=id_field, for_creation=for_creation) for filters in unicity_rules: existing, count = self.get_all(collection_id, parent_id, filters=filters, id_field=id_field) if count > 0: field = filters[0].field raise exceptions.UnicityError(field, existing[0])
def create( self, resource_name, parent_id, obj, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ): id_generator = id_generator or self.id_generator obj = {**obj} if id_field in obj: # Raise unicity error if object with same id already exists. try: existing = self.get(resource_name, parent_id, obj[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.ObjectNotFoundError: pass else: obj[id_field] = id_generator() self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field) _id = obj[id_field] obj = ujson.loads(self.json.dumps(obj)) self._store[parent_id][resource_name][_id] = obj self._cemetery[parent_id][resource_name].pop(_id, None) return obj
def create( self, resource_name, parent_id, obj, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, ): id_generator = id_generator or self.id_generator # This is very inefficient, but memory storage is not used in production. # The serialization provides the necessary consistency with other # backends implementation, and the deserialization creates a deep # copy of the passed object. obj = json.loads(json.dumps(obj)) if id_field in obj: # Raise unicity error if object with same id already exists. try: existing = self.get(resource_name, parent_id, obj[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.ObjectNotFoundError: pass else: obj[id_field] = id_generator() self.set_object_timestamp(resource_name, parent_id, obj, modified_field=modified_field) _id = obj[id_field] self._store[parent_id][resource_name][_id] = obj self._cemetery[parent_id][resource_name].pop(_id, None) return obj
def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = record.copy() if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() # Remove redundancy in data field query_record = record.copy() query_record.pop(id_field, None) query_record.pop(modified_field, None) query = """ WITH delete_potential_tombstone AS ( DELETE FROM deleted WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id ) INSERT INTO records (id, parent_id, collection_id, data, last_modified) VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified)) RETURNING id, as_epoch(last_modified) AS last_modified; """ placeholders = dict(object_id=record[id_field], parent_id=parent_id, collection_id=collection_id, last_modified=record.get(modified_field), data=json.dumps(query_record)) with self.client.connect() as conn: result = conn.execute(query, placeholders) inserted = result.fetchone() record[modified_field] = inserted['last_modified'] return record
def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): # Get the query and set table name query = CREATE_QUERY table_name = "records" # Check which id generator to use id_generator = id_generator or self.id_generator record = {**record} # If id_field in record check if it # already is present in table if id_field in record: try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: # Generate ID record[id_field] = id_generator() # Prepare values for SQL statement values_dict = { 'id': record[id_field], 'parent_id': parent_id, 'collection_id': collection_id, 'data': json.dumps(record), 'last_modified': time.time(), 'deleted': False } # Execute with self.client.connect(commit=True) as conn: results = conn.execute(query.format(table_name=table_name), values_dict) record = self.get(collection_id, parent_id, record[id_field]) return record
def create( self, resource_name, parent_id, obj, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, ): id_generator = id_generator or self.id_generator obj = {**obj} if id_field in obj: # Optimistically raise unicity error if object with same # id already exists. # Even if this check doesn't find one, be robust against # conflicts because we could race with another thread. # Still, this reduces write load because SELECTs are # cheaper than INSERTs. try: existing = self.get(resource_name, parent_id, obj[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.ObjectNotFoundError: pass else: obj[id_field] = id_generator() # Remove redundancy in data field query_object = {**obj} query_object.pop(id_field, None) query_object.pop(modified_field, None) # If there is an object in the table and it is deleted = TRUE, # we want to replace it. Otherwise, we want to do nothing and # throw a UnicityError. Per # https://stackoverflow.com/questions/15939902/is-select-or-insert-in-a-function-prone-to-race-conditions/15950324#15950324 # a WHERE clause in the DO UPDATE will lock the conflicting # row whether it is true or not, so the subsequent SELECT is # safe. We add a constant "inserted" field to know whether we # need to throw or not. query = """ INSERT INTO objects (id, parent_id, resource_name, data, last_modified, deleted) VALUES (:object_id, :parent_id, :resource_name, (:data)::JSONB, from_epoch(:last_modified), FALSE) ON CONFLICT (id, parent_id, resource_name) DO UPDATE SET last_modified = from_epoch(:last_modified), data = (:data)::JSONB, deleted = FALSE WHERE objects.deleted = TRUE RETURNING id, data, as_epoch(last_modified) AS last_modified; """ safe_holders = {} placeholders = dict( object_id=obj[id_field], parent_id=parent_id, resource_name=resource_name, last_modified=obj.get(modified_field), data=self.json.dumps(query_object), ) with self.client.connect() as conn: result = conn.execute(query % safe_holders, placeholders) inserted = result.fetchone() if not inserted: raise exceptions.UnicityError(id_field) obj[modified_field] = inserted["last_modified"] return obj
def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = {**record} if id_field in record: # Optimistically raise unicity error if record with same # id already exists. # Even if this check doesn't find one, be robust against # conflicts because we could race with another thread. # Still, this reduces write load because SELECTs are # cheaper than INSERTs. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() # Remove redundancy in data field query_record = {**record} query_record.pop(id_field, None) query_record.pop(modified_field, None) # If there is a record in the table and it is deleted = TRUE, # we want to replace it. Otherwise, we want to do nothing and # throw a UnicityError. Per # https://stackoverflow.com/questions/15939902/is-select-or-insert-in-a-function-prone-to-race-conditions/15950324#15950324 # a WHERE clause in the DO UPDATE will lock the conflicting # row whether it is true or not, so the subsequent SELECT is # safe. We add a constant "inserted" field to know whether we # need to throw or not. query = """ WITH create_record AS ( INSERT INTO records (id, parent_id, collection_id, data, last_modified, deleted) VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified), FALSE) ON CONFLICT (id, parent_id, collection_id) DO UPDATE SET last_modified = from_epoch(:last_modified), data = (:data)::JSONB, deleted = FALSE WHERE records.deleted = TRUE RETURNING id, data, last_modified ) SELECT id, data, as_epoch(last_modified) AS last_modified, TRUE AS inserted FROM create_record UNION ALL SELECT id, data, as_epoch(last_modified) AS last_modified, FALSE AS inserted FROM records WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id LIMIT 1; """ safe_holders = {} placeholders = dict(object_id=record[id_field], parent_id=parent_id, collection_id=collection_id, last_modified=record.get(modified_field), data=self.json.dumps(query_record)) with self.client.connect() as conn: result = conn.execute(query % safe_holders, placeholders) inserted = result.fetchone() if not inserted['inserted']: record = inserted['data'] record[id_field] = inserted['id'] record[modified_field] = inserted['last_modified'] raise exceptions.UnicityError(id_field, record) record[modified_field] = inserted['last_modified'] return record
def _check_unicity(self, conn, collection_id, parent_id, record, unique_fields, id_field, modified_field, for_creation=False): """Check that no existing record (in the current transaction snapshot) violates the resource unicity rules. """ # If id is provided by client, check that no record conflicts. if for_creation and id_field in record: unique_fields = (unique_fields or tuple()) + (id_field,) if not unique_fields: return query = """ SELECT id FROM records WHERE parent_id = :parent_id AND collection_id = :collection_id AND (%(conditions_filter)s) AND %(condition_record)s LIMIT 1; """ safeholders = dict() placeholders = dict(parent_id=parent_id, collection_id=collection_id) # Transform each field unicity into a query condition. filters = [] for field in set(unique_fields): value = record.get(field) if value is None: continue sql, holders = self._format_conditions( [Filter(field, value, COMPARISON.EQ)], id_field, modified_field, prefix=field) filters.append(sql) placeholders.update(**holders) # All unique fields are empty in record if not filters: return safeholders['conditions_filter'] = ' OR '.join(filters) # If record is in database, then exclude it of unicity check. if not for_creation: object_id = record[id_field] sql, holders = self._format_conditions( [Filter(id_field, object_id, COMPARISON.NOT)], id_field, modified_field) safeholders['condition_record'] = sql placeholders.update(**holders) else: safeholders['condition_record'] = 'TRUE' result = conn.execute(query % safeholders, placeholders) if result.rowcount > 0: existing = result.fetchone() record = self.get(collection_id, parent_id, existing['id']) raise exceptions.UnicityError(unique_fields[0], record)
def unicity_failure(*args, **kwargs): raise storage_exceptions.UnicityError('city', {'id': 42})