def _where_dict(self, parent_method: Callable, column: str, operator: str = None, value: Any = None): # Not sure I want this code if I can't do where AND and where OR etc... # Maybe revisit later. found_dict_where = False if type(column) == str: # Swap operator and value if not value: value = operator; operator = '=' if '.' in column: parts = column.split(".") if len(parts) == 2: fieldname = parts[0] fieldvalue = parts[1] field = self.entity.modelfields.get(fieldname) # Don't use modelfield() as it throws exception relation = field.relation.fill(field) if fieldvalue not in relation.entity.modelfields.keys(): dict_key = getvalue(relation, 'dict_key') dict_value = getvalue(relation, 'dict_value') if dict_key and type(dict_value) == str: found_dict_where = True if found_dict_where: # Dict where found, convert to proper where based on dict_key return parent_method([ (fieldname + '.' + dict_key, '=', fieldvalue), (fieldname + '.' + dict_value, operator, value) ]) pass else: # Regular where, pass to parent (builder.py) where return parent_method(column, operator, value)
async def unlink(self, relation_name: str, models: Union[Any, List[Any]] = None) -> None: """Unlink records to relation using the Many-To-Many pivot table""" # NOTICE hooks: We do NOT actuall need to fire hooks on relation tables! # Because there are no relation table models to listen for those hooks! # Get the entity of this model instance (which is the metaclass, aka self.__class__) entity = self.__class__ # Get field and relation info field = entity.modelfield(relation_name) relation = field.relation.fill(field) # Ensure model is a list ids = [] if models is not None: if type(models) != list: models = [models] ids = [getvalue(x, relation.entity.pk) for x in models] if type(relation) == BelongsToMany: # Get table and start where on self ID table = relation.join_table query = table.delete().where( getattr(table.c, relation.left_key) == getattr( self, entity.pk)) if models is not None: # Add in proper relation Ids query = query.where( getattr(table.c, relation.right_key).in_(ids)) await entity.execute( query ) # No hooks needed, relation table are NOT models, no listeners elif type(relation) == MorphToMany: # Get table and start where on self ID table = relation.join_table query = (table.delete().where( getattr(table.c, relation.left_type) == entity.tablename).where( getattr(table.c, relation.left_key) == getvalue( self, entity.pk))) if models is not None: # Add in proper relation Ids query = query.where( getattr(table.c, relation.right_key).in_(ids)) await entity.execute( query ) # No hooks needed, relation table are NOT models, no listeners else: raise Exception('Uninking is for Many-To-Many relations only.')
async def create(self, relation_name: str, models: Union[Any, List[Any]]) -> None: """Create related child records and link them to this parent (self) model""" # Get the entity of this model instance (which is the metaclass, aka self.__class__) entity = self.__class__ # Get field and relation info field = entity.modelfield(relation_name) relation = field.relation.fill(field) # Convert to list if type(models) != list: models = [models] # Assume each does not yet exists, so create all as BULK insert if type(relation) == HasOne or type(relation) == HasMany or type( relation) == MorphOne or type(relation) == MorphMany: # Fill in relation foreign key vlaue for model in models: # Set new relation value (works of model is a dict or a model class instance!) setvalue(model, relation.foreign_key, getattr(self, relation.local_key)) if type(relation) == MorphOne or type(relation) == MorphMany: # For Polymorphic relations, also set type column setvalue(model, relation.foreign_type, entity.tablename) # # Check for Data types in values to serialize # dict_values = getvalue(relation, 'dict_value') # if dict_values: # if type(dict_values) != list: dict_values = [dict_values] # for dict_value in dict_values: # value = getvalue(model, dict_value) # if type(value) == dict or type(value) == list: # value = str(value) # setvalue(model, dict_value, value) # Bulk insert new values with proper keys await relation.entity.insert(models) # Cannot assume a record has been created or linked, so loop each and test # Because of this, we cannot bulk insert elif type(relation) == BelongsToMany or type(relation) == MorphToMany: for model in models: # If its PK is set, it already exists create = getvalue(model, relation.entity.pk) == None if create: pk_value = await relation.entity.insert(model) setvalue(model, relation.entity.pk, pk_value) # Link in pivot table await self.link(relation_name, model) else: raise Exception( 'Creating children does not work for this type of relation.')
def _row_to_model(self, row=None): """Convert a single table row (SQLAlchemy RowProxy) or DICT of table into a model instance""" if not row: row = self.args[0] prefix = None if len(self.args) == 2: prefix = self.args[1] fields = {} for field in self.instance.__modelfields__.values(): if not field.column and not field.evaluate: continue # NO, because we added an override of show_writeonly() on query builder # So this is handled in metaclass.py selectable_columns instead # NO if field.write_only: continue if field.evaluate: if type(field.evaluate) == dict: # Evaluate is a Dict with callback and named parameters eval_method = field.evaluate['method'] del field.evaluate['method'] #dump('tuple here', field.evaluate) fields[field.name] = eval_method(row, **field.evaluate) elif type(field.evaluate) == tuple: # Evaluate is a Tuple with callback and parameters eval_method = field.evaluate[0] fields[field.name] = eval_method(row, *field.evaluate[1:]) else: # Evaluate is a callback fields[field.name] = field.evaluate(row) else: column = field.column if prefix: column = prefix + '__' + column # Use haskey instead of hasattr because data could be an SQLAlchemy row (which is class notation) # or a dictionary. haskey and getvalue work the same on class models or dictionaries! if haskey(row, column): fields[field.name] = getvalue(row, column) return self.entity(**fields)
async def link(self, relation_name: str, models: Union[Any, List[Any]]) -> None: """Link records to relation using the Many-To-Many pivot table""" # NOTICE hooks: We do NOT actuall need to fire hooks on relation tables! # Because there are no relation table models to listen for those hooks! # NOTICE models: Models can be single model, single dict, List[Model], List[Dict] # and I do NOT have to convert them to actual models. Because I am using my custom getvalue # the pk is pulled regardless of model or dict! # Get the entity of this model instance (which is the metaclass, aka self.__class__) entity = self.__class__ # Get field and relation info field = entity.modelfield(relation_name) relation = field.relation.fill(field) # Ensure models are always a list if type(models) != list: models = [models] # Insert linkage data one link at a time so we can gracefully skip duplicates if type(relation) == BelongsToMany: for model in models: # Set pivot relation data left_key_value = getvalue(self, entity.pk) right_key_value = getvalue(model, relation.entity.pk) pivot = { relation.left_key: left_key_value, relation.right_key: right_key_value } # Check if exists. # The encode/databases layer does NOT abstract each backend DB libraries exceptions # into a common interface so You cannot catch generic IntegrityError. So instead of a # try catch, I will see if the record exists manually first :( - See https://github.com/encode/databases/issues/162 table = relation.join_table query = (sa.select([ getattr(table.c, relation.left_key) ]).select_from(table).where( getattr(table.c, relation.left_key) == left_key_value ).where( getattr(table.c, relation.right_key) == right_key_value)) exists = await entity.fetchone(query ) # Returns None if not exists if not exists: query = relation.join_table.insert().values(**pivot) await entity.execute( query ) # No hooks needed, relation table are NOT models, no listeners # # Try insert, fail silently if exists # try: # query = relation.join_table.insert().values(**pivot) # await entity.execute(query) # except: # # Ignore Integrity Errors # pass elif type(relation) == MorphToMany: for model in models: # Set polymorphic pivor relation data left_type_value = entity.tablename left_key_value = getvalue(self, entity.pk) right_key_value = getvalue(model, relation.entity.pk) pivot = { relation.left_type: left_type_value, relation.left_key: left_key_value, relation.right_key: right_key_value } # Check if exists. # The encode/databases layer does NOT abstract each backend DB libraries exceptions # into a common interface so You cannot catch generic IntegrityError. So instead of a # try catch, I will see if the record exists manually first :( - See https://github.com/encode/databases/issues/162 table = relation.join_table query = (sa.select([ getattr(table.c, relation.left_type) ]).select_from(table).where( getattr(table.c, relation.left_type) == left_type_value ).where( getattr(table.c, relation.left_key) == left_key_value ).where( getattr(table.c, relation.right_key) == right_key_value)) exists = await entity.fetchone(query ) # Returns None if not exists if not exists: query = relation.join_table.insert().values(**pivot) await entity.execute( query ) # No hooks needed, relation table are NOT models, no listeners # Try insert, fail silently if exists # try: # query = relation.join_table.insert().values(**pivot) # await entity.execute(query) # except pymysql.err.IntegrityError: # # Ignore Integrity Errors # pass else: # Linking only works for Many-To-Many relations raise Exception('Linking is for Many-To-Many relations only.')
def _build_orm_results(self, query: Query, primary: List, secondary: Dict = {}) -> List[E]: # No primary results, return empty List if not primary: return [] self.log.nl().header('Relations') self.log.dump(query.relations) self.log.nl().header('Primary Results') self.log.dump(primary) self.log.nl().header('Has Many Data') self.log.dump(secondary) # Deepcopy relations Dict so I can remove relations I have already processed. # I process all secondary results first. This means all left over relations are of # the primary results relations = deepcopy(query.relations) # Dictionary of all secondary converted models models = {} # Dictionary of all *One models as a cache to deduplicate class instantiation singles = {} # Full any *One relations method def fill_one_relations(rel_name: str, data: List): """Fill only the *One relations (One-To-One, One-To-Many)""" self.log.nl().header('Filling *One Relations for ' + rel_name) # Skip if no data if not data: #models[rel_name] = {} return # Determin if data is primary or secondary primary = (rel_name == 'primary') # Split rel_name into __ parts rel_name_parts = rel_name.split('__') # Get the actual field, relation and entity for this relation rel_name entity = self.entity if not primary: for rnpart in rel_name_parts: field: Field = entity.modelfield(rnpart) entity = field.relation.fill(field).entity #self.log.item('Field: ' + str(field)) self.log.item('Entity: ' + str(entity)) self.log.item('Data Keys: ' + str(data[0].keys())) # Add a new List to our Dict of models models[rel_name] = {} # Pk field and column pk = entity.pk pk_column = entity.mapper(pk).column() if not primary: pk_column = rel_name + '__' + pk_column # Track completed relations so I can remove from our relations list later completed_relations = {} # Loop each row of raw data i = 0 for row in data: # Because of Many-To-Many we could have the same model multiple times. But we only want # to convert and deal with it once based on unique PK if getattr(row, pk_column) in models[rel_name]: continue #if models[rel_name][getattr(row, pk_column)]: continue # Convert this one row to model (just the main fields, not relations) if primary: root_model = entity.mapper(row).model() #root_model = entity.mapper(row).row_to_model() else: root_model = entity.mapper(row, rel_name).model() #root_model = entity.mapper(row, rel_name).row_to_model() # Get pk value pk_value = getattr(root_model, pk) # Loop only *One relations that apply to this one "data" model relation: Relation for relation in relations.values(): # Only look at relations that begin with this relation__ and are *One if not primary and rel_name + '__' not in relation.name: continue # Walk up relations and exclude if ANY relation is a *Many starting from rel_name and up if relation.contains_many(query.relations, skip=rel_name_parts): continue # Log output if i == 0: self.log.item('Relation: ' + relation.name + ' - ' + str(relation)) # RowProxy results lookup prefix prefix = relation.name # Get all relation fieldnames from relation.name split fieldnames = relation.name.split('__') if not primary: # Skip the first __ parts of rel_name fieldnames = fieldnames[len(rel_name_parts):] if i == 0: self.log.item2(' Fieldnames: ' + ', '.join(fieldnames)) # Walk down the root model by fieldnames until you reach the nested # model that has the right field to hold this converted sub model # Remember each relation has the full__nested__name so always start with the # root_model for each relation and work your way down. model = root_model for f in range(0, len(fieldnames) - 1): model = getattr(model, fieldnames[f]) # Actual fieldname is always the LAST of the fieldnames, but our model was walked down. fieldname = fieldnames[-1] if fieldnames else relation.name # Walkdown Log if i == 0: self.log.item2(' Model Field: ' + fieldname) if i == 0: self.log.item2(' Field Model: ' + str(model.__class__)) # Convert this one rows relation data into a sub_relation model # Only convert each unique *One record just once, or else pull from singles cache # The odd part about this cache is if you include many nested relations one one parent model # And a child model also uses the same child, it too will include all nested relations # Example if you do .include('creator.info', 'owner'). If owner is id=1 and id=1 was already # a creator, that owner will also have the nested INFO filled out, because it pulls from the cache. if relation.entity.tablename not in singles: singles[relation.entity.tablename] = {} sub_model_pk = relation.name + '__' + relation.entity.mapper(relation.entity.pk).column() sub_model_pk_value = getattr(row, sub_model_pk) if sub_model_pk_value is not None and sub_model_pk_value not in singles[relation.entity.tablename]: singles[relation.entity.tablename][sub_model_pk_value] = relation.entity.mapper(row, prefix).model() #singles[relation.entity.tablename][sub_model_pk_value] = relation.entity.mapper(row, prefix).row_to_model() # Get sub_model from singles cache if sub_model_pk_value is not None: sub_model = singles[relation.entity.tablename][sub_model_pk_value] #sub_model = relation.entity.mapper(row, prefix).model(False) # No cache version # Add this converted sub_model to the walked down parent model setattr(model, fieldname, sub_model) # Mark relation as complete so I can delete from relations Dict later completed_relations[relation.name] = 1 # All *One relations have been converted and merged # Add this fully converted (including nested *One relations) model to List of models models[rel_name][pk_value] = root_model i += 1 # Delete all completed relations from our relation deepcopy. We will not need them again for completed_relation in completed_relations.keys(): del relations[completed_relation] # Fill in all *One relations for all secondary results first. # as each relation is merged it will be removed from our local relations deepcopy. # All relations left will be those on the main results data. for rel_name, rel_data in secondary.items(): fill_one_relations(rel_name, rel_data) #fill_one_relations(secondary['comments'], 'comments') # Fill in all *One relations for the primary results. fill_one_relations('primary', primary) # All relations left should be of *Many either for the primary results # or for any of the secondary results self.log.nl().header('Leftover Relations are *Many') self.log.dump(relations) # All records in models Dict are *Many and the main Primary dataset # All remaining relations are the *Many which should match the models Dict key # Looping the *Many relations in REVERSE gives us the deepest relations first which is critical self.log.nl().header('Combining Recursive *Many Models') relation: Relation for relation in reversed(relations.values()): # Relation name parts relation_parts = relation.name.split('__') field = relation_parts[-1] # If models does not contain this relation, skip the merge if relation.name not in models: continue # Get parent and child Dict of models children_name = relation.name children = models[relation.name] parents_name = 'primary' if len(relation_parts) > 1: parents_name = '__'.join(relation_parts[:-1]) if parents_name in models: # Parent is a *Many so grab from models parents = models[parents_name] else: # Parent is a *One, so grap from singles cache parents = singles[query.relations.get(parents_name).entity.tablename] self.log.item('Combining child: ' + children_name + ' into parent: ' + parents_name) # Determine if child *Many results should be displayed as a Dict or List dict_key = getvalue(relation, 'dict_key') dict_value = getvalue(relation, 'dict_value') list_value = getvalue(relation, 'list_value') # Loop parents so we can at least set each child to empty [] instead of None. We always want [] instead of None for empty children for parent in parents.values(): # Set empty [] or {} if dict_key: setattr(parent, field, {}) else: setattr(parent, field, []) # Merge in Many-To-Many by using the original RowProxy result which contains # The pivot tables joining column (left_key) if type(relation) == BelongsToMany or type(relation) == MorphToMany: left_key = relation.name + '__' + relation.left_key right_key = relation.name + '__' + relation.entity.mapper(relation.entity.pk).column() # QUESTION, what is the differente from children.values() vs # secondary? Look at the ELSE below that for child in children.values() # but this many* uses secondary? # Can I combine all relations into one large loop? # Because I am doing identical work in the dict_key stuff # ALSO all of this dict_key code may not work anyway # I bet the API will not know how to handle input and complain? # I may have to handle specially in the ModelRouter # Loop raw RowProxy to find proper pivot keys for row in secondary[relation.name]: left_id = getattr(row, left_key) right_id = getattr(row, right_key) # Get parent value, the value of the main table parent = parents[left_id] # Get child value. The value of the many record child = children[right_id] # Set None field to empty List if getattr(parent, field) is None: setattr(parent, field, []) # Add each *Many model as a Dict if dict_key: if dict_value: if type(dict_value) == list: value = {key:getattr(child, key) for key in dict_value} else: value = getattr(child, dict_value) else: value = child.dict() getattr(parent, field)[getattr(child, dict_key)] = value # Add each *Many model as a List of a single value elif list_value: getattr(parent, field).append(getattr(child, list_value)) # Add each *Many as a List of the actual Models else: # Append to list using deepcopy getattr(parent, field).append( # We must deep copy the record becuase we dedup the *Many # but they could be used multiple times # ?? Hum maybe not, good if you change one it changes them all # But all the *One will NOT be like this I don't believe, have to test #deepcopy(child[right_id]) child ) else: for child in children.values(): parent_pk_value = getattr(child, relation.foreign_key) if parent_pk_value not in parents: continue; parent = parents[parent_pk_value] field = relation_parts[-1] # # Set None field to empty list # if getattr(parent, field) is None: # setattr(parent, field, []) # DUPLCATING work here, see above for nearly exact same thing # need to optimize this code # Add each *Many model as a Dict if dict_key: if dict_value: if type(dict_value) == list: # Dict value is a list. Create a dictionary from the lists keys value = {key:getattr(child, key) for key in dict_value} else: # Dict value is a string, use just that fields value value = getattr(child, dict_value) else: # No dict value set, but there is a dict_key, so we want a dict. Use the entire record as a dict value = child.dict() getattr(parent, field)[getattr(child, dict_key)] = value #setattr(parent, field, 'x') # Add each *Many model as a List of a single value elif list_value: getattr(parent, field).append(getattr(child, list_value)) # Add each *Many as a List of the actual Models else: getattr(parent, field).append(child) self.log.nl().header('Singles Cache') self.log.dump(singles) self.log.nl().header('Secondary *Many Models') self.log.dump(models) # These models are already a Dict keyed by PK # Return existing primary model if user wanted keyby id if query.keyed_by == self.entity.pk: return models['primary'] # Key results by another column if query.keyed_by: keyed_entities = {} for entity in models['primary'].values(): keyed_entities[getattr(entity, self.query.keyed_by)] = entity # Return Dictionary of Entities return keyed_entities # No keyby, convert primary models to a List return [x for x in models['primary'].values()]