def _fetch_existing_data(self): try: sgfs = SGFS() tasks = sgfs.entities_from_path(self._exporter.workspace) if not tasks: raise ValueError('No entities in workspace %r' % self._exporter.workspace) if any(x['type'] != 'Task' for x in tasks): raise ValueError('Non-Task entity in workspace %r' % self._exporter.workspace) publishes = sgfs.session.find( 'PublishEvent', [ ('sg_link.Task.id', 'in') + tuple(x['id'] for x in tasks), ('sg_type', 'is', self._exporter.publish_type), ('sg_version', 'greater_than', 0), # Skipped failures. ], [ 'code', 'sg_version' ] ) except Exception as e: self._task_combo.clear() self._task_combo.addItem('Loading Error! %s' % e, {}) raise else: self.loaded_publishes.emit(tasks, publishes)
def check_paths(paths, only_published=True): sgfs = SGFS() res = [] for path in paths: publishes = sgfs.entities_from_path(path, 'PublishEvent') if only_published and not publishes: continue publish = publishes[0] if publishes else None if publish: siblings = sgfs.session.find('PublishEvent', [ ('sg_link', 'is', publish['sg_link']), ('code', 'is', publish['code']), ('sg_type', 'is', publish['sg_type']), ], ['sg_path']) siblings.sort(key=lambda x: x['sg_version']) latest = max(siblings, key=lambda pub: pub['sg_version']) else: siblings = [] latest = None res.append(ReferenceStatus( path=path, used=publish, latest=latest, is_latest=publish is latest if publish else False, all=siblings, )) return res
def _export(self, kwargs): if not self.safety_check(**kwargs): raise PublishSafetyError() task_data = self._task_combo.currentData() task = task_data.get('task') if not task: sgfs = SGFS() tasks = sgfs.entities_from_path(self._exporter.workspace, 'Task') if not tasks: raise ValueError('Could not find SGFS tagged entities') task = tasks[0] stream_data = self._name_combo.currentData() parent = stream_data.get('publish') publisher = self._exporter.publish( task, name=self.name(), description=self.description(), version=self.version(), parent=parent, thumbnail_path=self.thumbnail_path(), frames_path=self.frames_path(), movie_path=self.movie_path(), export_kwargs=kwargs, ) if self._promote_checkbox.isChecked(): # progress.setLabelText('Creating Version for Review...') promotion_fields = self._exporter.fields_for_review_version( publisher, **kwargs) print "PROMOTE", promotion_fields publisher.promote_for_review(**promotion_fields) # Create the timelog. minutes = self._timelog_spinbox.value() if minutes: # progress.setLabelText('Logging time...') publisher.sgfs.session.create( 'TimeLog', { 'project': publisher.entity.project(), 'entity': publisher.link, 'user': publisher.sgfs.session.guess_user(), 'duration': minutes, 'description': '%s_v%04d' % (publisher.name, publisher.version), 'date': datetime.datetime.utcnow().date(), }) # progress.hide() return publisher
def assert_workspace(): scene_path = cmds.file(q=True, sceneName=True) if not scene_path: raise ValueError("Scene is not saved.") sgfs = SGFS() tasks = sgfs.entities_from_path(scene_path, ['Task']) if not tasks: raise ValueError("Scene is not in a task.") path = sgfs.path_for_entity(tasks[0]) workspace_path(path)
def _export(self, kwargs): if not self.safety_check(**kwargs): raise PublishSafetyError() task_data = self._task_combo.currentData() task = task_data.get('task') if not task: sgfs = SGFS() tasks = sgfs.entities_from_path(self._exporter.workspace, 'Task') if not tasks: raise ValueError('Could not find SGFS tagged entities') task = tasks[0] stream_data = self._name_combo.currentData() parent = stream_data.get('publish') publisher = self._exporter.publish(task, name=self.name(), description=self.description(), version=self.version(), parent=parent, thumbnail_path=self.thumbnail_path(), frames_path=self.frames_path(), movie_path=self.movie_path(), export_kwargs=kwargs, ) if self._promote_checkbox.isChecked(): # progress.setLabelText('Creating Version for Review...') promotion_fields = self._exporter.fields_for_review_version(publisher, **kwargs) print "PROMOTE", promotion_fields publisher.promote_for_review(**promotion_fields) # Create the timelog. minutes = self._timelog_spinbox.value() if minutes: # progress.setLabelText('Logging time...') publisher.sgfs.session.create('TimeLog', { 'project': publisher.entity.project(), 'entity': publisher.link, 'user': publisher.sgfs.session.guess_user(), 'duration': minutes, 'description': '%s_v%04d' % (publisher.name, publisher.version), 'date': datetime.datetime.utcnow().date(), }) # progress.hide() return publisher
def open_workspace(): path = workspace_path() sgfs = SGFS() entities = sgfs.entities_from_path(workspace_path(), ['Task', 'PublishEvent']) if entities: path = sgfs.path_for_entity(entities[0]) or path if sys.platform == 'darwin': call(['open', path]) else: call(['xdg-open', path])
def __init__(self, exporter): super(Widget, self).__init__() self._exporter = exporter self._existing_streams = set() basename = os.path.basename(exporter.filename_hint) basename = os.path.splitext(basename)[0] basename = re.sub(r'[^\w-]+', '_', basename) basename = re.sub(r'_*[rv]\d+', '', basename) basename = _strip_seps(basename) # TODO: Strip entity_name and step_name with SGFS scene_name functions. if self._exporter.workspace: sgfs = SGFS() tasks = sgfs.entities_from_path(self._exporter.workspace, ['Task']) if tasks: task = tasks[0] task_name, entity, step = task.fetch(('content', 'entity', 'step')) if entity: basename = _strip_prefix(basename, entity.fetch('code')) if step: basename = _strip_prefix(basename, step.fetch('short_name')) # Default to something reasonable. basename = ( basename or task_name or (step.get('short_name') if step else None) or (entity.get('code') if entity else None) or '' ) self._basename = basename self._setup_ui() # First screenshot. self.take_full_screenshot()
def check_paths(paths, only_published=True): sgfs = SGFS() res = [] for path in paths: publishes = sgfs.entities_from_path(path, 'PublishEvent') if only_published and not publishes: continue publish = publishes[0] if publishes else None if publish: siblings = sgfs.session.find('PublishEvent', [ ('sg_link', 'is', publish['sg_link']), ('code', 'is', publish['code']), ('sg_type', 'is', publish['sg_type']), ], ['sg_path']) siblings.sort(key=lambda x: x['sg_version']) latest = max(siblings, key=lambda pub: pub['sg_version']) else: siblings = [] latest = None res.append( ReferenceStatus( path=path, used=publish, latest=latest, is_latest=publish is latest if publish else False, all=siblings, )) return res
def submit(self): scene_path = cmds.file(q=True, sceneName=True) scene_name = os.path.splitext(os.path.basename(scene_path))[0] if self.location_method == 'publish': sgfs = SGFS() tasks = sgfs.entities_from_path(scene_path, ['Task']) if not tasks: raise ValueError("Scene is not saved under a Shotgun Task.") task = tasks[0] # TODO: Set a status. # TODO: Pull code, link, description, etc, from user. # TODO: Add metadata about the layers rendered. with sgpublish.Publisher( link=task, type='maya_render', name='Render', lock_permissions=False, ) as publisher: self.output_directory = publisher.directory maya_version = cmds.about(version=True) is_farmsoup = self.driver == 'farmsoup' if is_farmsoup: client = farmsoup.client.Client() group = client.group(name=self.name, ) base_resv = { 'maya{}.install'.format(maya_version): 1, 'maya{}.license'.format(maya_version): 1, } for camera, include_camera in sorted(self.cameras.items()): if not include_camera: continue for layer, include_layer in sorted(self.layers.items()): if not include_layer: continue renderer = self.renderers.get( layer) or self.renderers['masterLayer'] args = [ 'Render', '-V', maya_version, ] reservations = base_resv.copy() if self.reserve_renderer and renderer not in ( # Don't bother reserving the built-in ones. 'mayaSoftware', 'mayaHardware2', ): # These look like "arnold" and "redshift". reservations['maya{}-{}.install'.format( maya_version, renderer)] = 1 reservations['{}.install'.format(renderer)] = 1 reservations['{}.license'.format(renderer)] = 1 if renderer == 'redshift': args.extend(( '-r', 'redshift', # This must not be escaped! '-gpu', '{$FARMSOUP_RESERVED_GPUS_TOKENS}')) reservations['cpus'] = 1 reservations['gpus'] = 1 else: args.extend(( # Redshift doesn't understand -fnc. # Nothing really depends on this, so it isn't a big deal. '-fnc', 'name.#.ext', )) args.extend(( '-s', '$F' if is_farmsoup else str(self.start_frame), '-e', '$F_end' if is_farmsoup else str(self.end_frame), '-x', str(int(self.width)), '-y', str(int(self.height)), '-pad', '4', )) if self.skip_existing: args.extend(('-skipExistingFrames', 'true')) # We need to ask Maya to do the templating for us, because # otherwise it will decide that because there are multiple # render layers that there will be a naming collision, and so # it automatically adds directories for us. template_name = self.filename_pattern.format( scene=scene_name, layer='<RenderLayer>', camera='<Camera>', ) display_name = self.filename_pattern.format( scene=scene_name, layer=layer, camera=camera, ).replace(':', '_') args.extend(( '-cam', camera, '-rl', layer, # We're only escaping the ones that we need to, because # Redshift relies upon the envvars to pick GPUs at # render time. '-rd', sh_quote(self.output_directory), '-im', sh_quote(template_name), sh_quote(scene_path))) command = ' '.join(args) if is_farmsoup: job = group.job( name=display_name, reservations=reservations, ).setup_as_subprocess(command, shell=True) job.expand_via_range('F={}-{}/{}'.format( self.start_frame, self.end_frame, self.frame_chunk)) else: print ' '.join(args) # TODO: Add a job to set the Shotgun status on each once they are done. if is_farmsoup: client.submit(group) return group
class SceneName(object): def __init__(self, **kwargs): # Reasonable defaults. self.detail = '' self.entity_name = '' self.entity_type = None self.extension = '' self.revision = 1 self.step_name = kwargs.get('step_name') self.sub_directory = '' self.directory = 'scenes' self.version = 0 self.sep = ',' self._all_seps_class = '[%s]' % re.escape('-_,.') self._strip_seps_re = re.compile( r'(^%s+)|(%s+$)' % (self._all_seps_class, self._all_seps_class)) self._sgfs = SGFS() # Callbacks. self.warning = kwargs.pop('warning', self.warning) self.error = kwargs.pop('error', self.error) if self.error is False: self.error = self.warning self._step_names = [] # Parse given paths. self.workspace = kwargs.pop('workspace', None) if self.workspace is not None: self._parse_workspace(self.workspace) self.filename = kwargs.pop('filename', None) if self.filename is not None: self._parse_filename(self.filename) # Set kwargs. self.detail = kwargs.pop('detail', self.detail) self.entity_name = kwargs.pop('entity_name', self.entity_name) self.entity_type = kwargs.pop('entity_type', self.entity_type) self.extension = kwargs.pop('extension', self.extension) self.revision = int(kwargs.pop('revision', self.revision)) self.step_name = kwargs.pop('step_name', self.step_name) # "scenes_name" one is for backwards compatibility. self.directory = kwargs.pop('directory', kwargs.pop('scenes_name', self.directory)) self.sub_directory = kwargs.pop('sub_directory', self.sub_directory) self.version = int(kwargs.pop('version', self.version)) if kwargs: raise TypeError(('%s recieved too many kwargs: ' % self.__class__.__name__) + ', '.join(kwargs)) def __repr__(self): return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) def __str__(self): return self.get_path() def warning(self, message): print '# Warning:', message def error(self, message): raise ValueError(message) def _strip_seps(self, x): return self._strip_seps_re.sub('', x) def _split_workspace(self, workspace): tasks = self._sgfs.entities_from_path(workspace, ['Task']) if not tasks: self.error('No Tasks in current workspace') return # Incase error is not an exception. if len(tasks) > 1: warning_parts = [ '%s Tasks in current workspace; picking first of:' % len(tasks) ] for task in tasks: warning_parts.append(str(task)) self.warning('\n'.join(warning_parts)) task = tasks[0] try: task_workspace = self._sgfs.path_from_template( task, 'maya_workspace') except ValueError as e: self.warning('No maya_workspace template: %s' % e) task_workspace = os.path.join(self._sgfs.path_for_entity(task), 'maya') remaining = os.path.relpath(workspace, task_workspace) if remaining == os.path.curdir: remaining = '' return task, task_workspace, remaining def _parse_workspace(self, workspace, warn_on_remaining=True): task, task_workspace, remaining = self._split_workspace(workspace) if remaining.startswith(os.path.pardir): self.error( 'Entity not in workspace; SGFS seems broken! %s not in %s' % (task, workspace)) return if remaining and warn_on_remaining: self.warning('workspace may be too specific; %r remains' % remaining) entity = task.fetch('entity') self.entity_type = entity['type'] self.entity_name = entity.name self.step_name = task.fetch('step.Step.short_name') self.workspace = task_workspace self._step_names = [] def _parse_filename(self, filename): if os.path.isabs(filename): rel_filename = os.path.relpath(filename, self.workspace) if rel_filename.startswith('.'): self.warning('file not in workspace; %r not in %r' % (filename, self.workspace)) _, _, rel_filename = self._split_workspace(filename) else: rel_filename = filename # Extension filename, self.extension = os.path.splitext(rel_filename) directory = os.path.dirname(filename) filename = os.path.basename(filename) # Versions and revisions come out of the basename, and then the dirname m = re.search(r'v(\d+)', filename) or re.search(r'v(\d+)', directory) if m: self.version = int(m.group(1)) else: self.warning('Could not match version.') m = re.search(r'r(\d+)', filename) or re.search(r'r(\d+)', directory) if m: self.revision = int(m.group(1)) else: self.revision = 0 # Completely strip versioning out of the basename. filename = re.sub(r'[_]?[rv]\d+[_/]?', '', filename) filename = self._strip_seps(filename) # Assign (sub)directory around versioning. directory_parts = re.split(r'v\d+(?:/revisions?)?(?:/|$)', directory) if len(directory_parts) > 1: self.directory, self.sub_directory = directory_parts else: self.directory = directory # Strip entity name. if self.entity_name and filename.lower().startswith( self.entity_name.lower()): filename = filename[len(self.entity_name):] filename = self._strip_seps(filename) else: self.warning('Could not find shot/asset name prefix.') # Strip step name. if self.step_name and filename.lower().startswith( self.step_name.lower()): filename = filename[len(self.step_name):] filename = self._strip_seps(filename) else: self.warning('Could not find task/step prefix.') self.detail = filename def get_step_names(self): if self._step_names: return self._step_names step_dir = os.path.dirname(os.path.dirname(self.workspace)) try: for name in os.listdir(step_dir): # XXX: Hardcoded SGFS tag name?! if os.path.exists(os.path.join(step_dir, name, '.sgfs.yml')): self._step_names.append(name) except OSError: pass # Make sure we have a step name. if self.step_name is None: if not self._step_names: self.error('Could not identify pipeline step.') self._step_names = [''] self.step_name = self._step_names[0] # Make sure the step name is in step_names. self._step_names.append(self.step_name) self._step_names = sorted(set(self._step_names), key=lambda x: x.lower()) return self._step_names def get_basename(self): parts = [ self.entity_name, self.step_name, self.detail, 'v%04d' % self.version, 'r%04d' % self.revision if self.revision else None, ] parts = [x for x in parts if x] parts = [re.sub(r'[^\w-]+', '_', x) for x in parts] parts = [self._strip_seps(x) for x in parts] basename = self.sep.join(parts) return basename + self.extension def get_directory(self): path = os.path.join(self.workspace, self.directory) # Add '/v0001/revisions' if in an Asset and this is a maya scene. # Because the artists said so. That's why. if self.entity_type == 'Asset' and self.directory.startswith('scenes'): path = os.path.join(path, 'v' + '%04d' % self.version) if self.revision: path = os.path.join(path, 'revisions') path = os.path.join(path, self.sub_directory) return path def get_path(self): return os.path.join(self.get_directory(), self.get_basename())
def check_ref_namespaces(): # Description : Python script runs through all the references within the maya scenes. # Checks they match the Shotgun defined Maya namespace, and renames # the incorrect namespaces. sgfs = SGFS() scene_references = cmds.file( query=True, reference=True) # finds all the reference paths in the scene scene_references.sort(reverse=True) #sorts them in reverse paths_and_assets = [] all_assets = [] # Collect all the assets so we can fetch them all at once. for path in scene_references: #info to query the maya namespace from the shotgun webpage assets = sgfs.entities_from_path(path, ['Asset']) if not assets: raise ValueError("No Asset entities for {}".format(path)) asset = assets[0] paths_and_assets.append((path, asset)) all_assets.append(asset) # Fetch them all in one call. sgfs.session.fetch(all_assets, ['sg_default_reference_namespace']) # Now that we have loaded all the asset namespaces, calculate what the # correct namespaces are. correct_namespaces = [] # (path, correct_namespace) tuples. for path, asset in paths_and_assets: #split is to find the duplicate number duplicate_number = path.split("{") duplicate_number = duplicate_number[-1].split("}") #if statement is to separate the first reference from the duplicates, because the first namespace will #respect the maya namespace totally the duplicates will have a suffix "_#" if path == duplicate_number[0]: #query shotgun defined namespace correct_namespace = asset['sg_default_reference_namespace'] else: #query shotgun defined namespace + "_#" correct_namespace = asset[ 'sg_default_reference_namespace'] + "_" + duplicate_number[0] correct_namespaces.append((path, correct_namespace)) # Make a few passes at changing namespaces until they are all fixed. # This is to deal with situations in which two references have each other's # namespace. Maya will let us attempt to set duplicate namespaces, but will # silently change it on us. So we just ask nicely a few times. for round_i in xrange(10): num_fixed = 0 for path, correct_namespace in correct_namespaces: #query curent namespace current_namespace = cmds.file(path, query=1, namespace=True) #renames namespace if it is incorrect if current_namespace != correct_namespace: print ' {} should be {} for {}'.format( current_namespace, correct_namespace, path) cmds.file(path, edit=1, namespace=correct_namespace) num_fixed += 1 # Check again (just for debugging). new_namespace = cmds.file(path, query=1, namespace=True) if new_namespace != correct_namespace: print ' Missed! Now', new_namespace print "Changed {} in round {}.".format(num_fixed, round_i) # Everything is fixed; bail! if not num_fixed: break else: raise ValueError("Could not fix all references after many attempts.")