def find_output_names(self, output_name=None, output_type=None, task=None, filter_=None, verbose=1): """Find output names within this step root. Args: output_name (str): filter by output name output_type (str): filter by output type task (str): filter by task filter_ (str): apply filter to paths verbose (int): print process data Returns: (TTOutputNameBase list): output names list """ _names = self.read_output_names(verbose=verbose) if filter_: _names = apply_filter(_names, filter_, key=operator.attrgetter('path')) if output_name: _names = [ _name for _name in _names if _name.output_name == output_name ] if output_type: _names = [ _name for _name in _names if _name.output_type == output_type ] if task: _names = [_name for _name in _names if _name.task == task] return _names
def test_passes_filter(self): assert passes_filter('blah', '-ag', verbose=1) assert passes_filter('blah', 'ah') assert not passes_filter('blah', 'ag') assert passes_filter('test maya', 'test blah') assert not passes_filter('test maya', 'test +blah') # Test key class _Test(object): def __init__(self, name): self.name = name _apple = _Test('apple') _banana = _Test('banana') _key = operator.attrgetter('name') assert passes_filter(_apple, 'apple', key=_key) _list = [_apple, _banana] assert apply_filter(_list, 'apple', key=_key) == [_apple] # Test case sensitive assert passes_filter('aaa', 'AAA') assert not passes_filter('aaa', 'AAA', case_sensitive=True) # Test quotes assert passes_filter('this is text', '"This is"')
def find_outputs(self, filter_=None): """Find outputs within this version dir. Args: filter_ (str): filter by path Returns: (TTOutput list): list of outputs """ _outs = self._read_outputs() if filter_: _outs = apply_filter( _outs, filter_, key=operator.attrgetter('path')) return _outs
def find_step_roots(self, class_=None, filter_=None): """Find steps in this shot. Args: class_ (TTStepRoot): override step root class filter_ (str): filter the list of steps Returns: (TTStepRoot list): list of steps """ _step_root = self._read_step_roots(class_=class_) return apply_filter(_step_root, filter_, key=operator.attrgetter('path'))
def _redraw__Asset(self): _type = get_single(self.ui.AssetType.selected_text(), catch=True) _assets = [_asset for _asset in self._asset_roots if _asset.sg_asset_type == _type] _filter = self.ui.AssetFilter.text() if _filter: _assets = apply_filter( _assets, _filter, key=operator.attrgetter('asset')) self.ui.Asset.blockSignals(True) self.ui.Asset.clear() for _asset in _assets: _item = qt.HListWidgetItem(_asset.asset, data=_asset) self.ui.Asset.addItem(_item) self.ui.Asset.setCurrentRow(0) self.ui.Asset.blockSignals(False) self._callback__Asset()
def find_names(self, class_=None, filter_=None, output_name=None, task=None, verbose=0): """Find output names in this type dir. Args: class_ (class): override output name class filter_ (str): filter by path output_name (str): filter by output name task (str): filter by task verbose (int): print process data Returns: (TTOutputName list): list of output names """ _names = self._read_names(class_=class_) lprint(' - FOUND {:d} NAMES'.format(len(_names)), verbose=verbose) if filter_: lprint(' - APPLYING FILTER', len(_names), verbose=verbose) _names = apply_filter( _names, filter_, key=operator.attrgetter('path')) if output_name is not None: lprint(' - APPLYING NAME FILTER', len(_names), verbose=verbose) _names = [_name for _name in _names if _name.output_name == output_name] if task is not None: lprint(' - APPLYING TASK FILTER', len(_names), verbose=verbose) _names_copy = _names[:] _names = [] for _name in _names_copy: if _name.task != task: lprint(' - REJECTED', _name, verbose=verbose) continue lprint(' - ACCEPTED', _name, verbose=verbose) _names.append(_name) lprint(' - FOUND {:d} MATCHING NAMES'.format(len(_names)), verbose=verbose) return _names
def find_connected(self, type_=None, depth=1, filter_=None, source=True, destination=True, verbose=0): """Recursively traverse connected nodes in graph. Args: type_ (str): filter by node type depth (int): traversal depth filter_ (str): filter by node name source (bool): traverse upstream nodes destination (bool): traverse downstream nodes verbose (int): print process data Returns: (HFnDependencyNode list): list of connected nodes """ from maya_psyhive import open_maya as hom # Find connections _conns = set() _this_conns = set(self.list_connections( source=source, destination=destination, shapes=True) or []) for _conn in _this_conns: _conn = hom.HFnDependencyNode(_conn) _conns.add(_conn) lprint(' '*(5-depth), 'ADDING', _conn, verbose=0) if depth > 0: _conns |= set(_conn.find_connected( depth=depth-1, source=source, destination=destination, verbose=verbose)) _conns = sorted(_conns) # Apply filters if type_: _conns = [ _conn for _conn in _conns if _conn.object_type() == type_] if filter_: _conns = apply_filter(_conns, filter_, key=str) return _conns
def find_project(name, verbose=0): """Find a project matching the given name. Args: name (str): project name to search for verbose (int): print process data Returns: (Project): matching project """ _projs = find_projects() _ematch = get_single( [_project for _project in _projs if _project.name == name], catch=True, verbose=verbose) if _ematch: return _ematch _fmatch = get_single(apply_filter(_projs, name, key=operator.attrgetter('name')), verbose=verbose + 1) return _fmatch
def find_versions(self, class_=None, version=None, filter_=None): """Find versions of this output name. Args: class_ (class): override output version class version (int): filter by version filter_ (str): filter file path Returns: (TTOutputVersion list): list of versions """ _vers = self._read_versions(class_=class_) # Apply version filter if version == 'latest' and _vers: _vers = [_vers[-1]] elif version is not None: _vers = [_ver for _ver in _vers if _ver.version == version] if filter_: _vers = apply_filter(_vers, filter_, key=operator.attrgetter('path')) return _vers
def reload_libs(mod_names=None, sort=None, execute=True, filter_=None, close_interfaces=True, catch=False, check_root=None, delete=False, verbose=1): """Reload libraries. Args: mod_names (str list): override list of modules to reload sort (fn): module reload sort function execute (bool): execute the reload (otherwise just print the sorted list) filter_ (str): filter the list of modules close_interfaces (bool): close interfaces before refresh catch (bool): no error on fail to reload check_root (str): compare module locations to this root - this is used to check if a location has been successfully changed delete (bool): delete and reimport modules on reload (to flush vars) verbose (int): print process data Returns: (bool): whether all module were successfully reloaded and their paths updated the root (if applicable) """ if close_interfaces: qt.close_all_interfaces() # Get list of mod names to sort if not mod_names: _mod_names = apply_filter(sys.modules.keys(), 'hv_test psyhive') else: _mod_names = mod_names _sort = sort or get_mod_sort(order=_RELOAD_ORDER) if filter_: _mod_names = apply_filter(_mod_names, filter_) _mod_names.sort(key=_sort) # Reload the modules _count = 0 _start = time.time() _fails = 0 for _mod_name in _mod_names: _mod = sys.modules[_mod_name] if not _mod: continue _reload_mod(mod=_mod, mod_name=_mod_name, execute=execute, sort=_sort, delete=delete, verbose=verbose, catch=catch) _count += 1 if check_root and not abs_path(_mod.__file__).startswith( abs_path(check_root)): _fails += 1 # Print summary _msg = 'Reloaded {:d} libs in {:.02f}s'.format(_count, time.time() - _start) if check_root: _msg += ' ({:d} fails)'.format(_fails) dprint(_msg, verbose=verbose) return not _fails
def test_apply_filter(self): assert apply_filter(['a', 'b'], None) == ['a', 'b']
def find_action_works(type_=None, task_filter=None, day_filter=None, max_age=None, after=None, task=None, root=None, filter_=None, version=None, fbx_filter=None, ma_filter=None, name=None, desc=None, force=False, progress=True): """Find action work files in frasier project. Args: type_ (str): filter by type (eg. Vignette, Disposition) task_filter (str): apply filter to work task attribute day_filter (str): filter by day (in %y%m%d format) max_age (float): reject any work files older than is many secs after (str): return works on or after this day (in %y%m%d format) task (str): filter by exact task name root (TTRoot): filter by root filter_ (str): apply filter to work file path version (int): filter by version (v001 are always ingested files) fbx_filter (str): apply filter export fbx path ma_filter (str): filter by vendor ma path name (str): filter by exact name desc (str): filter by exact desc force (bool): force reread actions from disk progress (bool): show progress bar on read Returns: (FrasierWork list): list of work files """ _works = _read_action_works(force=force, progress=progress) # Filters if filter_: _works = apply_filter(_works, filter_, key=operator.attrgetter('path')) if task_filter: _works = apply_filter(_works, task_filter, key=operator.attrgetter('task')) if fbx_filter: _works = [ _work for _work in _works if passes_filter(_work.get_export_fbx().path, fbx_filter) ] if ma_filter: _works = [ _work for _work in _works if _work.get_mtime() and passes_filter(_work.get_vendor_file(), ma_filter) ] # Match attr if task: _works = [_work for _work in _works if _work.task == task] if type_: _works = [_work for _work in _works if _work.type_ == type_] if root: _works = [_work for _work in _works if _work.get_root() == root] if name: _works = [_work for _work in _works if _work.name == name] if desc: _works = [_work for _work in _works if _work.desc == desc] if version: _works = [_work for _work in _works if _work.version == version] # Date match if max_age is not None: _works = [_work for _work in _works if _work.get_age() < max_age] if day_filter: _works = [ _work for _work in _works if _work.get_mtime() and _work.get_mtime_fmt('%y%m%d') == day_filter ] if after: _cutoff = get_time_f(time.strptime(after, '%y%m%d')) _works = [ _work for _work in _works if _work.get_mtime() and _work.get_mtime() >= _cutoff ] return _works