def get_current_user(): ''' 返回当前用户的orm :return: USER orm ''' import dayu_database import table session = dayu_database.get_session() return session.query( table.USER).filter(table.USER.name == current_user_name()).one()
def get_db_config(db_config_name): ''' 获得对应的DB_CONFIG orm :param db_config_name: 用户输入的config 名称。例如 db.movie :return: DB_CONFIG orm ''' import dayu_database from table import DB_CONFIG session = dayu_database.get_session() return session.query(DB_CONFIG).filter( DB_CONFIG.name == db_config_name).one()
def get_storage_config(storage_config_name): ''' 获得对应的STORAGE orm :param storage_config_name: 用户输入的config 名称。例如 storage.movie :return: STORAGE orm ''' import dayu_database from table import STORAGE session = dayu_database.get_session() return session.query(STORAGE).filter( STORAGE.name == storage_config_name).one()
def validate_depth(self, key, value): ''' 利用sqlalchemy 的验证机制,实现自动判断深度、以及自动解析meaning。 函数中其实并没有修改depth,只是利用这个验证机制,实现meaning 的解析。 用户永远不应该手动设置depth、meaning 这两个属性! :param key: :param value: 传入的depth (int) :return: depth 数值 (int) ''' if self.meaning is not None: return value import dayu_database import table # 这个code block 完成了读取db_config,然后根据里面的配置,解析depth 应该对应什么meaning session = dayu_database.get_session() assert session is not None config_orm = None try: config_orm = session.query(table.DB_CONFIG).filter(table.DB_CONFIG.name == self.db_config_name).one() except Exception as e: raise e mean = config_orm.config.get(str(value)) if len(mean['content']) > 1: branch_depth = mean['db_pattern'] parents = getattr(self, 'hierarchy', None) db_path_string = '/' + '/'.join(str(x.name) for x in parents[1:]) for _key, _value in branch_depth.items(): if re.match('^{0}$'.format(_key), db_path_string): self.meaning = _value break else: raise Exception('no match meaning with depth!') assert mean['is_end'][self.meaning] is (self.__tablename__ == 'file') else: self.meaning = mean['content'][0] # 小技巧,如果当前的meaning 是TYPE,那么正好把type_name 赋值为name。 if self.meaning == 'TYPE': self.type_name = self.name if self.meaning == 'TYPE_GROUP': self.type_group_name = self.name return value
def validate_type_name(self, key, value): if value is None: return value import dayu_database from util import get_class session = dayu_database.get_session() try: type_table = get_class('type') type_orm = session.query(type_table).filter(type_table.name == value).one() except: # session.rollback() raise Exception('no TYPE named: {}'.format(value)) return value
def get_configs(self, disk_type='publish'): ''' 内部的解析分析方法。 返回初步分析的大致结果: * db_config_orm = 对应的数据库结构orm * storage_orm = 对应的STORAGE orm * project_orm = 对应的项目orm (FOLDER orm,并且层级深度是1) * root = 根路径 * compoent = 从项目开始的所有层级结构的列表 :param disk_type: 路径存储的类型,可以是publish, work, cache 中的一种 :return: tuple ''' import dayu_database from table import STORAGE import util session = dayu_database.get_session() component = self.split('/') if component and ':' in component[0]: component[0] = component[0].lower() mark = component.index(disk_type) if disk_type in component else None if mark: root = '/'.join(component[:mark + 1]) storage_orm = None for x in session.query(STORAGE): if root in x.config[disk_type].values(): storage_orm = x break else: # raise Exception('no root matched in storage_config') return None, None, None, None, None project_name = component[mark + 1] project_orm = util.get_root_folder()[project_name] db_config_orm = None if project_orm: db_config_orm = project_orm.db_config else: # raise Exception('no matching project') return None, None, None, None, None return db_config_orm, storage_orm, project_orm, root, component[mark + 1:] else: return None, None, None, None, None
def get_root_folder(): ''' 获得整个数据库的Root ORM,可以理解为根路径 :return: FOLDER orm ''' import dayu_database import table import config session = dayu_database.get_session() try: return session.query(table.FOLDER).filter( table.FOLDER.name == config.DAYU_DB_ROOT_FOLDER_NAME).one() except: root = table.FOLDER(name=config.DAYU_DB_ROOT_FOLDER_NAME) session.add(root) session.commit() return root
def platform(self, platform=None): ''' 进行路径平台转换的便捷方法。 如果用户给定一个路径,这个路径是合法的流程内文件,那么可以将其转换到不同的操作系统对应的文件路径。 如果路径不是流程内文件,直接返回原路径 :param platform: string,可以是win32, darwin, linux2 中的一种,分别对应windows、苹果、linux :return: DiskPath 对象 ''' if platform is None: import sys platform = sys.platform import dayu_database from dayu_database.table import STORAGE session = dayu_database.get_session() component = self.split('/') mark = None area = None if 'publish' in component: area = 'publish' mark = component.index('publish') elif 'work' in component: area = 'work' mark = component.index('work') elif 'cache' in component: area = 'cache' mark = component.index('cache') if mark is None: return self else: root_path = '/'.join(component[:mark + 1]) for storage in session.query(STORAGE): for k, v in storage.config[area].items(): if v == root_path: target = storage.config[area].get(platform, None) result = DayuPath( target + '/' + '/'.join(component[mark + 1:])) if target else self return result return self
def get_vfx_onset(orm): ''' 快速获得可能和某个orm 相关的onset metadata。 匹配的依据是如果orm.name(或者orm.name 的一部分) 出现在metadata FILE.vfx_clue 中,那么就认为是相关的。 :param orm: FOLDER 或者FILE orm :return: generator ''' import dayu_database import table session = dayu_database.get_session() for meta_file in session.query(table.FILE) \ .filter(table.FILE.meaning == 'METADATA') \ .filter(table.FILE.top == orm.top): found = False for vfx_match_name in meta_file.vfx_clue: if vfx_match_name in ('ALL', '*'): found = True yield meta_file break if vfx_match_name in orm.name: found = True yield meta_file break if found is True: continue for vfx_match_name in meta_file.cam_clue: if vfx_match_name in ('ALL', '*'): found = True yield meta_file break if vfx_match_name in orm.name: found = True yield meta_file break
def push(self, overwrite=False): ''' 将所有读取到的json 预设,推到数据库 :param overwrite: 如果未False,只会创建新的配置。如果未True,即使数据库中已经存在了,也会强制更新配置内容。 :return: ''' import dayu_database session = dayu_database.get_session() table_class = util.get_class( 'storage') if self.prefix == 'storage' else util.get_class( self.prefix + '_config') for key in self.__class__.all_configs: try: old_orm = session.query(table_class).filter( table_class.name == key).one() if overwrite: old_orm.extra_data = self.__class__.all_configs[key] except: new_orm = table_class( name=key, extra_data=self.__class__.all_configs[key]) session.add(new_orm) session.commit() session.close()
def create_project(name, template_or_project=None, custom_storage=None): import dayu_database.born import table session = dayu_database.get_session() project_orm = table.FOLDER(name=name, parent=get_root_folder()) project_orm.db_config_name = 'db.{}'.format(name) project_orm.storage_config_name = 'storage.{}'.format(name) project_orm.pipeline_config_name = 'pipeline.{}'.format(name) if isinstance(template_or_project, basestring): template_manager = dayu_database.born.ConfigTemplateManager() sub_template_name = '.'.join(template_or_project.split('.')[1:]) print sub_template_name db_orm = table.DB_CONFIG( name=project_orm.db_config_name, extra_data=template_manager.db_config_manager.all_configs.get( 'db.' + sub_template_name)) storage_orm = table.STORAGE( name=project_orm.storage_config_name, extra_data=template_manager.storage_config_manager.all_configs.get( 'storage.' + sub_template_name)) pipeline_orm = table.PIPELINE_CONFIG( name=project_orm.pipeline_config_name, extra_data=template_manager.pipeline_config_manager.all_configs. get('pipeline.' + sub_template_name)) else: db_orm = table.DB_CONFIG( name=project_orm.db_config_name, extra_data=template_or_project.db_config.config) storage_orm = table.STORAGE( name=project_orm.storage_config_name, extra_data=template_or_project.storage.config) pipeline_orm = table.PIPELINE_CONFIG( name=project_orm.pipeline_config_name, extra_data=template_or_project.pipeline_config.config) if custom_storage: storage_orm.extra_data = custom_storage session.add_all([project_orm, db_orm, storage_orm, pipeline_orm]) project_orm.db_config = db_orm project_orm.storage = storage_orm project_orm.pipeline_config = pipeline_orm asset_seq_grp = table.FOLDER(name='asset', label='Asset', parent=project_orm) seq_seq_grp = table.FOLDER(name='sequence', label='Sequence', parent=project_orm) metadata_seq_grp = table.FOLDER(name='metadata', label='Metadata', parent=project_orm) asset_seq_dict = { 'chr': 'Character', 'efx': 'Effects', 'env': 'Environment', 'mpt': 'Matte Painting', 'prp': 'Prop', 'std': 'Standard', 'asb': 'Assembly', 'cpt': 'Concept', 'veh': 'Vehicle' } metadata_seq_dict = { 'lds': 'Lens Data', 'env': 'Environment', 'ref': 'Reference', 'report': 'Report', 'scan': 'Scanning', 'hdri': 'HDRI', 'misc': 'Misc', 'cpt': 'Concept', 'art': 'Art' } for k in asset_seq_dict: asset_seq = table.FOLDER(name=k, label=asset_seq_dict.get(k, k), parent=asset_seq_grp) for k in metadata_seq_dict: metadata_seq = table.FOLDER(name=k, label=asset_seq_dict.get(k, k), parent=metadata_seq_grp) return project_orm
def insert_folder(mapper, connection, target): ''' FOLDER 插入数据库之前,进行数据验证 :param mapper: :param connection: :param target: FOLDER orm :return: ''' import dayu_database as db import util # 如果标记为"删除", 那么不验证 if target.active is False: return # 如果是root,那么不验证 if target.name == config.DAYU_DB_ROOT_FOLDER_NAME: return # 必须指定parent assert target.parent_id is not None # 如果是第一个层级(通常是project),那么必须指定 db_config_name 和 storage_config_name assert not (target.parent.name == config.DAYU_DB_ROOT_FOLDER_NAME and target.db_config_name is None) assert not (target.parent.name == config.DAYU_DB_ROOT_FOLDER_NAME and target.storage_config_name is None) assert not (target.parent.name == config.DAYU_DB_ROOT_FOLDER_NAME and target.pipeline_config_name is None) # 其他层级深度,沿用parent 的db_config_name if target.db_config_name is None: target.db_config_name = target.parent.db_config_name # 其他层级深度,沿用parent 的 storage_config_name if target.storage_config_name is None: target.storage_config_name = target.parent.storage_config_name # 其他层级深度,沿用parent 的 pipeline_config_name if target.pipeline_config_name is None: target.pipeline_config_name = target.parent.pipeline_config_name # 沿用parent 的type_name if target.type_name is None: target.type_name = target.parent.type_name # 沿用parent 的type_group_name if target.type_group_name is None: target.type_group_name = target.parent.type_group_name # 深度+1,这里会调用 db.mixin.DepthMixin 中的 @validate('depth') 函数 target.depth = target.parent.depth + 1 if target.depth == 2: target.top_id = target.parent_id target.top = target.parent else: target.top_id = target.parent.top_id target.top = target.parent.top # 通过读取对应的 db_config 内容,将用户输入的short name,替换成完整的name # 例如,对于shot 来说,用户输入name=0010,但是处理之后会得到 pl_0010 的完整name session = db.get_session() assert session is not None config_orm = util.get_db_config(target.db_config_name) parents = list(getattr(target, 'hierarchy', None)) depth_config = config_orm.config[str(target.depth)] selected_names = (x.name for index, x in enumerate(parents) if index in depth_config['to_name_param'][target.meaning]) target.name = depth_config['to_name'][target.meaning].format(*selected_names) # 确保没有重名orm try: assert next((x for x in target.parent.children if x.name == target.name and x.id != target.id), None) is None except: raise Exception(target.name) # 如果没有输入label,那么用name 赋值给label,方便GUI 读取 if target.label is None: target.label = target.name # 由于shotgun 的监听事件需要修改cloud_id 和cloud_table, 所以必须在这里强行发射信号 import message message.pub(('event', 'db', 'folder', 'commit', 'before'), mapper, connection, target)
def items(self): ''' 根据search 的内容 查找数据库。 search 的选项会保存在extra_data 中,例如: {'target_table': 'folder', 'filters' : {'and': [{'col': 'name', 'op': 'like', 'value': '%0010%', 'do': True}, {'or': [{'col': 'top.name', 'op': 'eq', 'value': 'ss', 'do': True}, {'col' : 'created_by.name', 'op': 'in', 'value': 'yangzhuo,andyguo', 'do': True}]}]}} 其中 target_table 表示需要查找的table,而filters 表示搜索的条件。filter 的条件可以分为两大类: * 逻辑类,例如 and、or、not。 * filter类 逻辑类是个dict,key是逻辑的类型,value 永远是list。 filter 类,每个dict 表示一个搜索条件,包含有三个key: * col:表示搜索的名称,如果需要连续查询,可以用 . 将属性隔开,例如"top.name" * op:表示进行的操作,例如 in、eq、not_in * value:表示用户输入的内容,也是操作的数据。如果value 需要包含有多个值,可以用 , 隔开。例如:"a,b,c,d" :return: sql 查询对象,如果想要得到实际的内容,需要用户自行list() ''' import dayu_database as db import util import filter_parse # 获得查询的基本类 model_class = util.get_class(self.extra_data.get('target_table', 'folder')) # 建立最基本的查询对象 sql_expr = db.get_session().query(model_class) def _build_filter(model_class, col_name_list): ''' 根据一条filter 条件,创建出查询的主体对象 (用户永远不应该自己调用) :param model_class: ORM class :param col_name_list: string,查询条件中的 col :return: list ''' current_table = model_class relationship_filters = [] # 如果col 的查询名含有. 分割,那么可以认为需要多次连接查询 for col_name in col_name_list: # 得到对应orm class 的所有sqlalchemy 属性,只有这些属性才能够参与sql 的生成。排除纯Python属性 sql_attr = inspect(current_table).attrs.get(col_name, None) # 得到 qlalchemy.orm.attributes.InstrumentedAttribute 的对象。本质上就是类似 FOLDER.xxx 这样的形式 col_attr = getattr(current_table, col_name, None) # 如果是column,那么就是orm 表内属性,不需要子查询 if sql_attr.__class__ == ColumnProperty: relationship_filters.append(col_attr) # 如果是relationship,那么久需要子查询 elif sql_attr.__class__ == RelationshipProperty: # 判断是 one-to-many,还是many-to-one,来选择has、any 函数 if sql_attr.uselist: col_attr = getattr(col_attr, 'any', None) else: col_attr = getattr(col_attr, 'has', None) # 由于子查询可能跨表查询,所以后续的orm class 就会变为新的table current_table = sql_attr.mapper.class_ relationship_filters.append(col_attr) else: raise Exception('no such a name in ORM') return relationship_filters def traverse_filter(raw_filter): ''' 遍历整个search dict :param raw_filter: dict,必须是一个逻辑类的的。例如:{and: [...]} :return: 可以用在.filter() 函数内 ''' for key, value in raw_filter.items(): # 得到逻辑函数 logic = filter_parse.LOGIC_SWITCH[key] # 用于存放当前逻辑函数内的查询条件 param = [] for sub in value: # 如果查询条件没有col, 那么认为是一个嵌套的逻辑函数,递归调用traverse_filter if sub.get('col', None) is None: param.append(traverse_filter(sub)) else: # 这部分是正常的一个搜索条件 do = sub.get('do') # 跳过用户没有check 的条件 if not do: continue col_name = sub.get('col') op = sub.get('op') data_type = sub.get('type') # 对value 中的关键字进行解析。如果是string 那么直接返回,如果是dict,那么寻找dict 对应的key 处理函数 exp_value = filter_parse.resolve_expression(sub.get('value')) # 根据sqlalchemy 定义的data type 对string 进行转换,这样才能够得到正确的比较结果(例如DATETIME) exp_value = filter_parse.resolve_type(data_type, exp_value) # 调用_build_filter 得到查询的函数列表,列表的顺序是按照查询顺序排布 attr_list = _build_filter(model_class, col_name.split('.')) # 小技巧,因为sqlalchemy 没有not_in 之类的操作,只能讲其拆解为两部分,先判断in,然后对结果再not if 'not' in op: op = op.replace('not', '').strip('_') # 如果op 是in 的操作,那么value 需要是list if op == 'in': attr_list[-1] = attr_list[-1].in_(exp_value.split(',')) # 其他正常的操作,通过逐一判断可能存在的函数 else: attr = next((x.format(op) for x in ['{}', '{}_', '__{}__'] if hasattr(attr_list[-1], x.format(op))), None) if attr is None: raise Exception('not a legal op') if exp_value == 'null': exp_value = None attr_list[-1] = getattr(attr_list[-1], attr)(exp_value) # 反向reduce,总是用 list[n](list[n+1]),直到全部完成 # 得到类似 FOLDER.top.has(FOLDER.created.has(USER.name.in_([...]))) single_sql = attr_list.pop() while attr_list: single_sql = attr_list.pop()(single_sql) # 如果存在not,此时将所有的查询语句取反,例如 not_(...) if 'not' in sub.get('op'): single_sql = filter_parse.LOGIC_SWITCH['not'](single_sql) # 加入逻辑列表 param.append(single_sql) # 返回逻辑函数 return logic(*param) # 真正的调用,并且返回sql 查询对象,可以使用for loop 来遍历。否则需要用户自己 list() if self.extra_data.get('filters', None): filter_func = traverse_filter(self.extra_data['filters']) return (x for x in sql_expr.filter(filter_func).filter(model_class.active == True)) else: return []
def insert_file(mapper, connection, target): ''' 利用sqlalchemy 的监听机制,在FILE 写入数据库之前,进行数据校验 :param mapper: :param connection: :param target: :return: ''' import dayu_database as db import util # 如果删除,那么跳过检查 if target.active is False: return # 必须指定parent assert target.parent_id is not None # 继承parent 的db_config_name if target.db_config_name is None: target.db_config_name = target.parent.db_config_name # 继承parent 的 storage_config_name if target.storage_config_name is None: target.storage_config_name = target.parent.storage_config_name # 其他层级深度,沿用parent 的 pipeline_config_name if target.pipeline_config_name is None: target.pipeline_config_name = target.parent.pipeline_config_name # 继承parent 的type_name if target.type_name is None: target.type_name = target.parent.type_name # 继承parent 的type_group_name if target.type_group_name is None: target.type_group_name = target.parent.type_group_name # parent 的深度+1,会触发mixin.DepthMixin 的 @validate('depth') 函数 target.depth = target.parent.depth + 1 # 设置top 属性 if target.depth == 2: target.top_id = target.parent_id target.top = target.parent else: target.top_id = target.parent.top_id target.top = target.parent.top # 如果FILE 不指定文件名,那么很可能是meaning 为VERSION、DAILIES # 那么需要根据parent 文件夹内的已有文件,进行版本名自增 session = db.get_session() assert session is not None config_orm = util.get_db_config(target.db_config_name) if target.name is None: try: contents = target.parent.sub_files.filter(FILE.id != target.id)[-1] match = version_regex.match(str(contents.name)) if match: version_num = match.groups()[0] target.name = 'v%0{}d'.format(len(version_num)) % (int(version_num) + 1) # if target.old_file_id is None: # target.old_file_id = contents.id except: cas_info = db.util.get_cascading_info(target, 'cascading_info')['all_info'] target.name = cas_info.get('init_{}_version'.format(target.type_group_name), 'v0001') # 继续根据db_config 来进行完整name 的组合 parents = list(getattr(target, 'hierarchy', None)) depth_config = config_orm.config[str(target.depth)] selected_names = (x.name for index, x in enumerate(parents) if index in depth_config['to_name_param'][target.meaning]) target.name = depth_config['to_name'][target.meaning].format(*selected_names) # 保证没有重名 assert next((x for x in target.parent.children if x.name == target.name and x.id != target.id), None) is None # 如果没有label,那么把name 赋值给label,方便GUI 读取显示 if target.label is None: target.label = target.name # 由于shotgun 的监听事件需要修改cloud_id 和cloud_table, 所以必须在这里强行发射信号 import message message.pub(('event', 'db', 'file', 'commit', 'before'), mapper, connection, target)
def create(self, *list_of_names): ''' 以数据库路径的方式,连续创建新的orm。 有两个限制: * 不能通过这个方式创建project。因为project 层级需要用户提供db_config_name 和storage_config_name * 不能通过这个方式来实现创建自动增长版本号的VERSION :param list_of_names: list of string。这里需要注意,都是提供的"短名" :param parents: :return: 创建成功后的DBPath 对象 ''' import table import util import dayu_database session = dayu_database.get_session() current_orm = self.orm() new_path_list = [] if isinstance(current_orm, collections.Iterable): raise Exception('DBPath not represent a ORM!') config_orm = util.get_db_config(current_orm.db_config_name) for index, component in enumerate(list_of_names): sub_orm = next( (x for x in current_orm.children if x.name == component), None) if sub_orm is None: depth_config = config_orm.config.get( str(current_orm.depth + 1), None) if depth_config: current_new_path = self + '/' + '/'.join( list_of_names[:index + 1]) for _key, _value in depth_config['db_pattern'].items(): if re.match('^{0}$'.format(_key), current_new_path): meaning = _value if depth_config['is_end'][meaning]: new_orm = table.FILE(name=component, parent=current_orm) else: new_orm = table.FOLDER(name=component, parent=current_orm) try: session.add(new_orm) session.flush() sub_orm = new_orm new_path_list.append(sub_orm.name) break except Exception as e: session.rollback() if isinstance(new_orm, table.FILE): raise sub_orm = next((x for x in current_orm.children if x.name == e.message), None) new_path_list.append(sub_orm.name) break else: raise Exception( 'no match meaning with depth!, {}'.format( current_new_path)) else: raise Exception('no db_config found!') else: try: config_orm = util.get_db_config(sub_orm.db_config_name) new_path_list.append(component) except Exception as e: raise e current_orm = sub_orm return self.child(*new_path_list)