def map(entity, model, *args, **kwargs): """ The Clean mapper maps over all UniqueMarker instances. """ alias = kwargs.get("db", "default") namespace = settings.DATABASES.get(alias, {}).get("NAMESPACE", "") model = decode_model(model) if not entity.key().id_or_name().startswith(model._meta.db_table + "|"): # Only include markers which are for this model return assert namespace == entity.namespace() with disable_cache(): # At this point, the entity is a unique marker that is linked to an instance of 'model', now we should see if that instance exists! instance_id = entity["instance"].id_or_name() try: instance = model.objects.using(alias).get(pk=instance_id) except model.DoesNotExist: logger.info("Deleting unique marker %s because the associated instance no longer exists", entity.key().id_or_name()) datastore.Delete(entity) return # Get the possible unique markers for the entity, if this one doesn't exist in that list then delete it instance_entity, _ = django_instance_to_entities(connections[alias], instance._meta.fields, raw=True, instance=instance, check_null=False) identifiers = unique_identifiers_from_entity(model, instance_entity, ignore_pk=True) identifier_keys = [datastore.Key.from_path(UniqueMarker.kind(), i, namespace=entity["instance"].namespace()) for i in identifiers] if entity.key() not in identifier_keys: logger.info("Deleting unique marker %s because the it no longer represents the associated instance state", entity.key().id_or_name()) datastore.Delete(entity)
def map(instance, *args, **kwargs): """ Figure out what markers the instance should use and verify they're attached to this instance. Log any weirdness and in repair mode - recreate missing markers. """ action_id = kwargs.get("action_pk") repair = kwargs.get("repair") alias = kwargs.get("db", "default") namespace = settings.DATABASES.get(alias, {}).get("NAMESPACE") assert alias == (instance._state.db or "default") entity, _ = django_instance_to_entities(connections[alias], instance._meta.fields, raw=True, instance=instance, check_null=False) identifiers = unique_identifiers_from_entity(type(instance), entity, ignore_pk=True) identifier_keys = [datastore.Key.from_path(UniqueMarker.kind(), i, namespace=namespace) for i in identifiers] markers = datastore.Get(identifier_keys) instance_key = str(entity.key()) markers_to_save = [] for i, m in zip(identifier_keys, markers): marker_key = str(i) if m is None: # Missig marker if repair: new_marker = datastore.Entity(UniqueMarker.kind(), name=i.name(), namespace=namespace) new_marker['instance'] = entity.key() new_marker['created'] = datetime.datetime.now() markers_to_save.append(new_marker) else: log(action_id, "missing_marker", instance_key, marker_key) elif 'instance' not in m or not m['instance']: # Marker with missining instance attribute if repair: m['instance'] = entity.key() markers_to_save.append(m) else: log(action_id, "missing_instance", instance_key, marker_key) elif m['instance'] != entity.key(): if isinstance(m['instance'], basestring): m['instance'] = datastore.Key(m['instance']) if repair: markers_to_save.append(m) else: log(action_id, "old_instance_key", instance_key, marker_key) if m['instance'] != entity.key(): # Marker already assigned to a different instance log(action_id, "already_assigned", instance_key, marker_key) # Also log in repair mode as reparing would break the other instance. if markers_to_save: datastore.Put(markers_to_save)
def __init__(self, connection, model, objs, fields, raw): self.has_pk = any(x.primary_key for x in fields) self.model = model self.objs = objs self.connection = connection self.namespace = connection.ops.connection.settings_dict.get( "NAMESPACE") self.raw = raw self.fields = fields self.entities = [] self.included_keys = [] for obj in self.objs: if self.has_pk: # We must convert the PK value here, even though this normally happens in django_instance_to_entities otherwise # custom PK fields don't work properly value = self.model._meta.pk.get_db_prep_save( self.model._meta.pk.pre_save(obj, True), self.connection) self.included_keys.append( get_datastore_key(self.model, value, self.namespace ) if value else None) if value == 0: raise IntegrityError( "The datastore doesn't support 0 as a key value") if not self.model._meta.pk.blank and self.included_keys[ -1] is None: raise IntegrityError( "You must specify a primary key value for {} instances" .format(self.model)) else: # We zip() self.entities and self.included_keys in execute(), so they should be the same length self.included_keys.append(None) # We don't use the values returned, but this does make sure we're # doing the same validation as Django. See issue #493 for an # example of how not doing this can mess things up for field in fields: field.get_db_prep_save( getattr(obj, field.attname) if raw else field.pre_save( obj, True), connection=connection, ) primary, descendents = django_instance_to_entities( self.connection, self.fields, self.raw, obj) # Append the entity, and any descendents to the list to insert self.entities.append((primary, descendents))
def __init__(self, connection, model, objs, fields, raw): self.has_pk = any(x.primary_key for x in fields) self.model = model self.objs = objs self.connection = connection self.namespace = connection.ops.connection.settings_dict.get("NAMESPACE") self.raw = raw self.fields = fields self.entities = [] self.included_keys = [] for obj in self.objs: if self.has_pk: # We must convert the PK value here, even though this normally happens in # django_instance_to_entities otherwise # custom PK fields don't work properly value = self.model._meta.pk.get_db_prep_save( self.model._meta.pk.pre_save(obj, True), self.connection ) self.included_keys.append( get_datastore_key(self.model, value, self.namespace) if value else None ) if value == 0: raise IntegrityError("The datastore doesn't support 0 as a key value") if not self.model._meta.pk.blank and self.included_keys[-1] is None: raise IntegrityError("You must specify a primary key value for {} instances".format(self.model)) else: # We zip() self.entities and self.included_keys in execute(), so they should be the same length self.included_keys.append(None) # We don't use the values returned, but this does make sure we're # doing the same validation as Django. See issue #493 for an # example of how not doing this can mess things up for field in fields: field.get_db_prep_save( getattr(obj, field.attname) if raw else field.pre_save(obj, True), connection=connection, ) primary, descendents = django_instance_to_entities( self.connection, self.fields, self.raw, obj ) # Append the entity, and any descendents to the list to insert self.entities.append((primary, descendents))
def txn(): caching.remove_entities_from_cache_by_key([key], self.namespace) try: result = rpc.Get(key) except datastore_errors.EntityNotFoundError: # Return false to indicate update failure return False if ( isinstance(self.select.gae_query, (Query, meta_queries.UniqueQuery)) # ignore QueryByKeys and NoOpQuery and not utils.entity_matches_query(result, self.select.gae_query) ): # Due to eventual consistency they query may have returned an entity which no longer # matches the query return False original = copy.deepcopy(result) instance_kwargs = {field.attname: value for field, param, value in self.values} # Note: If you replace MockInstance with self.model, you'll find that some delete # tests fail in the test app. This is because any unspecified fields would then call # get_default (even though we aren't going to use them) which may run a query which # fails inside this transaction. Given as we are just using MockInstance so that we can # call django_instance_to_entities it on it with the subset of fields we pass in, # what we have is fine. meta = self.model._meta instance = MockInstance( _original=MockInstance(_meta=meta, **result), _meta=meta, **instance_kwargs ) # Convert the instance to an entity primary, descendents = django_instance_to_entities( self.connection, [x[0] for x in self.values], # Pass in the fields that were updated True, instance, model=self.model ) # Update the entity we read above with the new values result.update(primary) # Remove fields which have been marked to be unindexed for col in getattr(primary, "_properties_to_remove", []): if col in result: del result[col] # Make sure that any polymodel classes which were in the original entity are kept, # as django_instance_to_entities may have wiped them as well as added them. polymodel_classes = list(set( original.get(POLYMODEL_CLASS_ATTRIBUTE, []) + result.get(POLYMODEL_CLASS_ATTRIBUTE, []) )) if polymodel_classes: result[POLYMODEL_CLASS_ATTRIBUTE] = polymodel_classes def perform_insert(): """ Inserts result, and any descendents with their ancestor value set """ inserted_key = rpc.Put(result) if descendents: for i, descendent in enumerate(descendents): descendents[i] = Entity( descendent.kind(), parent=inserted_key, namespace=inserted_key.namespace(), id=descendent.key().id() or None, name=descendent.key().name() or None ) descendents[i].update(descendent) rpc.Put(descendents) if not constraints.has_active_unique_constraints(self.model): # The fast path, no constraint checking perform_insert() caching.add_entities_to_cache( self.model, [result], caching.CachingSituation.DATASTORE_PUT, self.namespace, skip_memcache=True, ) else: markers_to_acquire[:], markers_to_release[:] = constraints.get_markers_for_update( self.model, original, result ) perform_insert() constraints.update_identifiers(markers_to_acquire, markers_to_release, result.key()) # If the rpc.Put() fails then the exception will only be raised when the # transaction applies, which means that we will still get to here and will still have # applied the marker changes (because they're in a nested, independent transaction). # Hence we set this flag to tell us that we got this far and that we should roll them back. rollback_markers[0] = True # If something dies between here and the `return` statement then we'll have stale unique markers try: # Update the cache before dealing with unique markers, as CachingSituation.DATASTORE_PUT # will only update the context cache caching.add_entities_to_cache( self.model, [result], caching.CachingSituation.DATASTORE_PUT, self.namespace, skip_memcache=True, ) except: # We ignore the exception because raising will rollback the transaction causing # an inconsistent state logger.exception("Unable to update the context cache") pass # Return true to indicate update success return True