def test_assemble_case_sensitive(): '''Assemble collections respecting casing.''' collections, _ = clique.assemble( [ 'head_v1.001.ext', 'head_v1.002.ext', 'HEAD_v1.003.ext', 'HEAD_v1.004.ext' ], case_sensitive=True ) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2])), clique.Collection('HEAD_v1.', '.ext', 3, indexes=set([3, 4])) ] assert collections == expected
def make_sequence_collection(path, otio_range, metadata): """ Make collection from path otio range and otio metadata. Args: path (str): path to image sequence with `%d` otio_range (otio._ot._ot.TimeRange): range to be used metadata (dict): data where padding value can be found Returns: list: dir_path (str): path to sequence, collection object """ if "%" not in path: return None file_name = os.path.basename(path) dir_path = os.path.dirname(path) head = file_name.split("%")[0] tail = os.path.splitext(file_name)[-1] first, last = otio_range_to_frame_range(otio_range) collection = clique.Collection(head=head, tail=tail, padding=metadata["padding"]) collection.indexes.update([i for i in range(first, (last + 1))]) return dir_path, collection
def test_assemble_with_custom_pattern(): '''Assemble with custom pattern.''' collections, _ = clique.assemble( ['head_v1.001.ext', 'head_v1.002.ext'], patterns=[re.compile('\.{0}\.ext$'.format(clique.DIGITS_PATTERN))] ) expected = [clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2]))] assert collections == expected
def test_assemble_case_insensitive(): '''Assemble collections ignoring casing.''' collections, _ = clique.assemble( ['head_v1.001.ext', 'HEAD_v1.002.ext', 'head_v1.003.ext'], case_sensitive=False ) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])) ] assert collections == expected
def test_assemble_minimum_items_filter(): '''Filter assembled collections by minimum number of items.''' items = [ 'head_v1.001.ext', 'head_v1.002.ext', 'head_v1.003.ext', 'head_v2.001.ext', 'head_v2.002.ext' ] collections, _ = clique.assemble(items, minimum_items=1) expected = [ clique.Collection('head_v', '.001.ext', 0, indexes=set([1, 2])), clique.Collection('head_v', '.002.ext', 0, indexes=set([1, 2])), clique.Collection('head_v', '.003.ext', 0, indexes=set([1])), clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])), clique.Collection('head_v2.', '.ext', 3, indexes=set([1, 2])) ] assert sorted(collections) == sorted(expected) collections, _ = clique.assemble(items, minimum_items=3) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])) ] assert collections == expected collections, _ = clique.assemble(items, minimum_items=5) expected = [] assert collections == expected
def process(self, context): default_cameras = ["persp", "top", "front", "side"] for camera in pymel.core.ls(type="camera"): transform = camera.getTransform() # Skip default cameras if transform.name() in default_cameras: continue # Remove illegal disk characters name = transform.name().replace(":", "_") # Movie instance instance = context.create_instance(name=name) instance.add(camera) instance.data["families"] = ["local", "mov", "playblast"] instance.data["family"] = "mov" label = "{0} - {1} - {2}".format(name, "playblast", "local") instance.data["label"] = label # Adding/Checking publish attribute instance.data["publish"] = False if hasattr(transform, "publish"): attr = pymel.core.Attribute(transform.name() + ".publish") instance.data["publish"] = attr.get() else: pymel.core.addAttr(transform, longName="publish", defaultValue=False, attributeType="bool") attr = pymel.core.Attribute(transform.name() + ".publish") pymel.core.setAttr(attr, channelBox=True) # Generate collection filename = os.path.splitext( os.path.basename(context.data["currentFile"]))[0] path = os.path.join(os.path.dirname(context.data["currentFile"]), "workspace", filename) head = "{0}_{1}.".format(path, name) tail = ".mov" collection = clique.Collection(head=head, padding=4, tail=tail) frame_start = int( pymel.core.playbackOptions(query=True, minTime=True)) collection.add(head + str(frame_start).zfill(4) + tail) instance.data["collection"] = collection
def get_version_collections(self, collection, version): """Return all collections of previous collection versions.""" collections = [] for count in range(1, int(version)): version_string = "v" + str(count).zfill(len(version)) head = collection.head.replace("v" + version, version_string) version_collection = clique.Collection(head=head.replace( "\\", "/"), padding=collection.padding, tail=collection.tail) version_collection.name = collection.name version_collection.label = collection.label version_collection.family = collection.family version_collection.version = count version_collection.nodes = collection.nodes collections.append(version_collection) return collections
def _create_representation(self, start, duration): """ Creating representation data. Args: start (int): start frame duration (int): duration frames Returns: dict: representation data """ end = start + duration # create default representation data representation_data = { "frameStart": start, "frameEnd": end, "stagingDir": self.staging_dir, "tags": ["review", "ftrackreview", "delete"] } collection = clique.Collection(self.temp_file_head, tail=self.output_ext, padding=self.padding, indexes=set(self.used_frames)) start = min(collection.indexes) end = max(collection.indexes) files = [f for f in collection] ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], "ext": ext[1:], "files": files, "frameStart": start, "frameEnd": end, }) return representation_data
def process(self, context): # Gather all valid collections valid_families = ["img", "cache", "scene", "mov", "camera", "geometry"] invalid_families = ["read"] collections = [] instances = context + context.data.get("instances", []) for instance in instances: families = instance.data.get("families", []) families += [instance.data["family"]] invalid_family = list(set(families) & set(invalid_families)) if invalid_family: continue family_type = list(set(families) & set(valid_families)) if not family_type: continue instance_collection = instance.data.get("collection", None) if instance_collection: collection = clique.Collection( head=instance_collection.head, padding=instance_collection.padding, tail=instance_collection.tail) # Store instance data on collection for later usage collection.name = instance.data["name"] collection.label = instance.data["label"] collection.family = family_type[0] collection.version = context.data["version"] collection.nodes = instance[:] # Get older version collections version = self.get_version( os.path.basename(collection.format()), "v") if version: collections.extend( self.get_version_collections(collection, version[1])) # Single file outputs output_path = instance.data.get("output_path", "") if output_path: # Get older version collections version = self.get_version(os.path.basename(output_path), "v") if version: self.single_file_instances(instance, version[1], family_type, context) files = self.scan_collections_files(collections) # Generate instances from collections populated_collections = [] for collection in collections: collection = self.populate_collection(collection, files) if not list(collection): continue # Ensure collections are unique if collection in populated_collections: continue else: populated_collections.append(collection) instance = context.create_instance(name=collection.name) label = collection.label.split("-")[0] + "- " fmt = "{head}{padding}{tail}" label += os.path.basename(collection.format(fmt)) label += collection.format(" [{ranges}]") instance.data["label"] = label instance.data["families"] = [collection.family] instance.data["family"] = "output" instance.data["publish"] = False instance.data["collection"] = collection instance.data["version"] = collection.version for node in collection.nodes: instance.add(node)
def process(self, context): import os import re import hiero.exporters as he import clique for parent in context: if "trackItem" != parent.data["family"]: continue for task in parent.data["tasks"]: asset_type = None hiero_cls = he.FnSymLinkExporter.SymLinkExporter if isinstance(task, hiero_cls): asset_type = "img" movie_formats = [".mov", ".R3D"] ext = os.path.splitext(task.resolvedExportPath())[1] if ext in movie_formats: asset_type = "mov" hiero_cls = he.FnTranscodeExporter.TranscodeExporter if isinstance(task, hiero_cls): asset_type = "img" if task.resolvedExportPath().endswith(".mov"): asset_type = "mov" hiero_cls = he.FnNukeShotExporter.NukeShotExporter if isinstance(task, hiero_cls): asset_type = "scene" hiero_cls = he.FnAudioExportTask.AudioExportTask if isinstance(task, hiero_cls): asset_type = "audio" # Skip all non supported export types if not asset_type: continue resolved_path = task.resolvedExportPath() # Formatting the basename to not include frame padding or # extension. name = os.path.splitext(os.path.basename(resolved_path))[0] name = name.replace(".", "") name = name.replace("#", "") name = re.sub(r"%.*d", "", name) instance = context.create_instance(name=name, parent=parent) instance.data["task"] = task instance.data["item"] = parent.data["item"] instance.data["family"] = "trackItem.task" instance.data["families"] = [asset_type, "local", "task"] label = "{0} ({1}) - {2} - local".format( name, parent, asset_type) instance.data["label"] = label instance.data["handles"] = parent.data["handles"] # Add collection or output if asset_type == "img": collection = None if "#" in resolved_path: head = resolved_path.split("#")[0] padding = resolved_path.count("#") tail = resolved_path.split("#")[-1] collection = clique.Collection(head=head, padding=padding, tail=tail) if "%" in resolved_path: collection = clique.parse( resolved_path, pattern="{head}{padding}{tail}") instance.data["collection"] = collection else: instance.data["output_path"] = resolved_path
def process(self, instance): self.log.debug( "--- Integration of Master version for subset `{}` begins.".format( instance.data.get("subset", str(instance)) ) ) published_repres = instance.data.get("published_representations") if not published_repres: self.log.debug( "*** There are not published representations on the instance." ) return project_name = api.Session["AVALON_PROJECT"] # TODO raise error if master not set? anatomy = instance.context.data["anatomy"] if "master" not in anatomy.templates: self.log.warning("!!! Anatomy does not have set `master` key!") return if "path" not in anatomy.templates["master"]: self.log.warning(( "!!! There is not set `path` template in `master` anatomy" " for project \"{}\"." ).format(project_name)) return master_template = anatomy.templates["master"]["path"] self.log.debug("`Master` template check was successful. `{}`".format( master_template )) master_publish_dir = self.get_publish_dir(instance) src_version_entity = instance.data.get("versionEntity") filtered_repre_ids = [] for repre_id, repre_info in published_repres.items(): repre = repre_info["representation"] if repre["name"].lower() in self.ignored_representation_names: self.log.debug( "Filtering representation with name: `{}`".format( repre["name"].lower() ) ) filtered_repre_ids.append(repre_id) for repre_id in filtered_repre_ids: published_repres.pop(repre_id, None) if not published_repres: self.log.debug( "*** All published representations were filtered by name." ) return if src_version_entity is None: self.log.debug(( "Published version entity was not sent in representation data." " Querying entity from database." )) src_version_entity = ( self.version_from_representations(published_repres) ) if not src_version_entity: self.log.warning(( "!!! Can't find origin version in database." " Skipping Master version publish." )) return all_copied_files = [] transfers = instance.data.get("transfers", list()) for _src, dst in transfers: dst = os.path.normpath(dst) if dst not in all_copied_files: all_copied_files.append(dst) hardlinks = instance.data.get("hardlinks", list()) for _src, dst in hardlinks: dst = os.path.normpath(dst) if dst not in all_copied_files: all_copied_files.append(dst) all_repre_file_paths = [] for repre_info in published_repres.values(): published_files = repre_info.get("published_files") or [] for file_path in published_files: file_path = os.path.normpath(file_path) if file_path not in all_repre_file_paths: all_repre_file_paths.append(file_path) # TODO this is not best practice of getting resources for publish # WARNING due to this we must remove all files from master publish dir instance_publish_dir = os.path.normpath( instance.data["publishDir"] ) other_file_paths_mapping = [] for file_path in all_copied_files: # Check if it is from publishDir if not file_path.startswith(instance_publish_dir): continue if file_path in all_repre_file_paths: continue dst_filepath = file_path.replace( instance_publish_dir, master_publish_dir ) other_file_paths_mapping.append((file_path, dst_filepath)) # Current version old_version, old_repres = ( self.current_master_ents(src_version_entity) ) old_repres_by_name = { repre["name"].lower(): repre for repre in old_repres } if old_version: new_version_id = old_version["_id"] else: new_version_id = io.ObjectId() new_master_version = { "_id": new_version_id, "version_id": src_version_entity["_id"], "parent": src_version_entity["parent"], "type": "master_version", "schema": "pype:master_version-1.0" } schema.validate(new_master_version) # Don't make changes in database until everything is O.K. bulk_writes = [] if old_version: self.log.debug("Replacing old master version.") bulk_writes.append( ReplaceOne( {"_id": new_master_version["_id"]}, new_master_version ) ) else: self.log.debug("Creating first master version.") bulk_writes.append( InsertOne(new_master_version) ) # Separate old representations into `to replace` and `to delete` old_repres_to_replace = {} old_repres_to_delete = {} for repre_info in published_repres.values(): repre = repre_info["representation"] repre_name_low = repre["name"].lower() if repre_name_low in old_repres_by_name: old_repres_to_replace[repre_name_low] = ( old_repres_by_name.pop(repre_name_low) ) if old_repres_by_name: old_repres_to_delete = old_repres_by_name archived_repres = list(io.find({ # Check what is type of archived representation "type": "archived_repsentation", "parent": new_version_id })) archived_repres_by_name = {} for repre in archived_repres: repre_name_low = repre["name"].lower() archived_repres_by_name[repre_name_low] = repre backup_master_publish_dir = None if os.path.exists(master_publish_dir): backup_master_publish_dir = master_publish_dir + ".BACKUP" max_idx = 10 idx = 0 _backup_master_publish_dir = backup_master_publish_dir while os.path.exists(_backup_master_publish_dir): self.log.debug(( "Backup folder already exists." " Trying to remove \"{}\"" ).format(_backup_master_publish_dir)) try: shutil.rmtree(_backup_master_publish_dir) backup_master_publish_dir = _backup_master_publish_dir break except Exception: self.log.info(( "Could not remove previous backup folder." " Trying to add index to folder name" )) _backup_master_publish_dir = ( backup_master_publish_dir + str(idx) ) if not os.path.exists(_backup_master_publish_dir): backup_master_publish_dir = _backup_master_publish_dir break if idx > max_idx: raise AssertionError(( "Backup folders are fully occupied to max index \"{}\"" ).format(max_idx)) break idx += 1 self.log.debug("Backup folder path is \"{}\"".format( backup_master_publish_dir )) try: os.rename(master_publish_dir, backup_master_publish_dir) except PermissionError: raise AssertionError(( "Could not create master version because it is not" " possible to replace current master files." )) try: src_to_dst_file_paths = [] for repre_info in published_repres.values(): # Skip if new repre does not have published repre files published_files = repre_info["published_files"] if len(published_files) == 0: continue # Prepare anatomy data anatomy_data = repre_info["anatomy_data"] anatomy_data.pop("version", None) # Get filled path to repre context anatomy_filled = anatomy.format(anatomy_data) template_filled = anatomy_filled["master"]["path"] repre_data = { "path": str(template_filled), "template": master_template } repre_context = template_filled.used_values for key in self.db_representation_context_keys: if ( key in repre_context or key not in anatomy_data ): continue repre_context[key] = anatomy_data[key] # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) repre["parent"] = new_master_version["_id"] repre["context"] = repre_context repre["data"] = repre_data repre.pop("_id", None) schema.validate(repre) repre_name_low = repre["name"].lower() # Replace current representation if repre_name_low in old_repres_to_replace: old_repre = old_repres_to_replace.pop(repre_name_low) repre["_id"] = old_repre["_id"] bulk_writes.append( ReplaceOne( {"_id": old_repre["_id"]}, repre ) ) # Unarchive representation elif repre_name_low in archived_repres_by_name: archived_repre = archived_repres_by_name.pop( repre_name_low ) old_id = archived_repre["old_id"] repre["_id"] = old_id bulk_writes.append( ReplaceOne( {"old_id": old_id}, repre ) ) # Create representation else: repre["_id"] = io.ObjectId() bulk_writes.append( InsertOne(repre) ) # Prepare paths of source and destination files if len(published_files) == 1: src_to_dst_file_paths.append( (published_files[0], template_filled) ) continue collections, remainders = clique.assemble(published_files) if remainders or not collections or len(collections) > 1: raise Exception(( "Integrity error. Files of published representation " "is combination of frame collections and single files." "Collections: `{}` Single files: `{}`" ).format(str(collections), str(remainders))) src_col = collections[0] # Get head and tail for collection frame_splitter = "_-_FRAME_SPLIT_-_" anatomy_data["frame"] = frame_splitter _anatomy_filled = anatomy.format(anatomy_data) _template_filled = _anatomy_filled["master"]["path"] head, tail = _template_filled.split(frame_splitter) padding = int( anatomy.templates["render"].get( "frame_padding", anatomy.templates["render"].get("padding") ) ) dst_col = clique.Collection( head=head, padding=padding, tail=tail ) dst_col.indexes.clear() dst_col.indexes.update(src_col.indexes) for src_file, dst_file in zip(src_col, dst_col): src_to_dst_file_paths.append( (src_file, dst_file) ) self.path_checks = [] # Copy(hardlink) paths of source and destination files # TODO should we *only* create hardlinks? # TODO should we keep files for deletion until this is successful? for src_path, dst_path in src_to_dst_file_paths: self.copy_file(src_path, dst_path) for src_path, dst_path in other_file_paths_mapping: self.copy_file(src_path, dst_path) # Archive not replaced old representations for repre_name_low, repre in old_repres_to_delete.items(): # Replace archived representation (This is backup) # - should not happen to have both repre and archived repre if repre_name_low in archived_repres_by_name: archived_repre = archived_repres_by_name.pop( repre_name_low ) repre["old_id"] = repre["_id"] repre["_id"] = archived_repre["_id"] repre["type"] = archived_repre["type"] bulk_writes.append( ReplaceOne( {"_id": archived_repre["_id"]}, repre ) ) else: repre["old_id"] = repre["_id"] repre["_id"] = io.ObjectId() repre["type"] = "archived_representation" bulk_writes.append( InsertOne(repre) ) if bulk_writes: io._database[io.Session["AVALON_PROJECT"]].bulk_write( bulk_writes ) # Remove backuped previous master if ( backup_master_publish_dir is not None and os.path.exists(backup_master_publish_dir) ): shutil.rmtree(backup_master_publish_dir) except Exception: if ( backup_master_publish_dir is not None and os.path.exists(backup_master_publish_dir) ): os.rename(backup_master_publish_dir, master_publish_dir) self.log.error(( "!!! Creating of Master version failed." " Previous master version maybe lost some data!" )) raise self.log.debug(( "--- Master version integration for subset `{}`" " seems to be successful." ).format( instance.data.get("subset", str(instance)) ))
def process(self, instance): if "remote" in instance.data.get("families", []): return if not self.check_executable("ffmpeg"): msg = "Skipping movie extraction because \"ffmpeg\" wasn't found." self.log.info(msg) return collection = instance.data.get("collection", []) if not list(collection): msg = "Skipping \"{0}\" because no frames was found." self.log.info(msg.format(instance.data["name"])) return if len(list(collection)) == 1: msg = "Skipping \"{0}\" because only a single frame was found." self.log.info(msg.format(instance.data["name"])) return output_file = collection.format("{head}0001.mov") args = [ "ffmpeg", "-y", "-gamma", "2.2", "-framerate", "25", "-start_number", str(list(collection.indexes)[0]), "-i", collection.format("{head}{padding}{tail}"), "-q:v", "0", "-pix_fmt", "yuv420p", "-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2,colormatrix=bt601:bt709", "-timecode", "00:00:00:01", output_file ] self.log.debug("Executing args: {0}".format(args)) # Can't use subprocess.check_output, cause Houdini doesn't like that. p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE) output = p.communicate()[0] if p.returncode != 0: raise ValueError(output) self.log.debug(output) output_collection = clique.Collection( head=collection.format("{head}"), padding=4, tail=".mov" ) output_collection.add(output_file) components = instance.data.get("ftrackComponentsList", []) components.append({ "assettype_data": {"short": "mov"}, "assetversion_data": { "version": instance.context.data["version"] }, "component_data": { "name": instance.data.get( "component_name", instance.data["name"] ), }, "component_path": output_collection.format(), "component_overwrite": True, }) instance.data["ftrackComponentsList"] = components
def process(self, instance): if not instance.data.get("representations"): instance.data["representations"] = list() version_data = dict() # get basic variables otio_clip = instance.data["otioClip"] frame_start = instance.data["frameStart"] frame_end = instance.data["frameEnd"] # generate range in parent otio_src_range = otio_clip.source_range otio_avalable_range = otio_clip.available_range() trimmed_media_range = pype.lib.trim_media_range( otio_avalable_range, otio_src_range) # calculate wth handles otio_src_range_handles = pype.lib.otio_range_with_handles( otio_src_range, instance) trimmed_media_range_h = pype.lib.trim_media_range( otio_avalable_range, otio_src_range_handles) # frame start and end from media s_frame_start, s_frame_end = pype.lib.otio_range_to_frame_range( trimmed_media_range) a_frame_start, a_frame_end = pype.lib.otio_range_to_frame_range( otio_avalable_range) a_frame_start_h, a_frame_end_h = pype.lib.otio_range_to_frame_range( trimmed_media_range_h) # fix frame_start and frame_end frame to be in range of media if a_frame_start_h < a_frame_start: a_frame_start_h = a_frame_start if a_frame_end_h > a_frame_end: a_frame_end_h = a_frame_end # count the difference for frame_start and frame_end diff_start = s_frame_start - a_frame_start_h diff_end = a_frame_end_h - s_frame_end # add to version data start and end range data # for loader plugins to be correctly displayed and loaded version_data.update({ "frameStart": frame_start, "frameEnd": frame_end, "handleStart": diff_start, "handleEnd": diff_end, "fps": otio_avalable_range.start_time.rate }) # change frame_start and frame_end values # for representation to be correctly renumbered in integrate_new frame_start -= diff_start frame_end += diff_end media_ref = otio_clip.media_reference metadata = media_ref.metadata # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer if isinstance(media_ref, otio.schema.ImageSequenceReference): is_sequence = True else: # for OpenTimelineIO 0.12 and older if metadata.get("padding"): is_sequence = True self.log.info( "frame_start-frame_end: {}-{}".format(frame_start, frame_end)) if is_sequence: # file sequence way if hasattr(media_ref, "target_url_base"): self.staging_dir = media_ref.target_url_base head = media_ref.name_prefix tail = media_ref.name_suffix collection = clique.Collection( head=head, tail=tail, padding=media_ref.frame_zero_padding ) collection.indexes.update( [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) self.log.debug(collection) repre = self._create_representation( frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url collection_data = pype.lib.make_sequence_collection( path, trimmed_media_range, metadata) self.staging_dir, collection = collection_data self.log.debug(collection) repre = self._create_representation( frame_start, frame_end, collection=collection) else: dirname, filename = os.path.split(media_ref.target_url) self.staging_dir = dirname self.log.debug(path) repre = self._create_representation( frame_start, frame_end, file=filename) if repre: instance.data["versionData"] = version_data self.log.debug(">>>>>>>> version data {}".format(version_data)) # add representation to instance data instance.data["representations"].append(repre) self.log.debug(">>>>>>>> {}".format(repre))
def process(self, instance): # TODO: convert resulting image sequence to mp4 # TODO: add oudio ouput to the mp4 if audio in review is on. # get otio clip and other time info from instance clip # TODO: what if handles are different in `versionData`? handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] otio_review_clips = instance.data["otioReviewClips"] # add plugin wide attributes self.representation_files = list() self.used_frames = list() self.workfile_start = int(instance.data.get("workfileFrameStart", 1001)) - handle_start self.padding = len(str(self.workfile_start)) self.used_frames.append(self.workfile_start) self.to_width = instance.data.get("resolutionWidth") or self.to_width self.to_height = instance.data.get( "resolutionHeight") or self.to_height # skip instance if no reviewable data available if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \ and (len(otio_review_clips) == 1): self.log.warning( "Instance `{}` has nothing to process".format(instance)) return else: self.staging_dir = self.staging_dir(instance) if not instance.data.get("representations"): instance.data["representations"] = list() # loop available clips in otio track for index, r_otio_cl in enumerate(otio_review_clips): # QUESTION: what if transition on clip? # get frame range values src_range = r_otio_cl.source_range start = src_range.start_time.value duration = src_range.duration.value available_range = None self.actual_fps = src_range.duration.rate # add available range only if not gap if isinstance(r_otio_cl, otio.schema.Clip): available_range = r_otio_cl.available_range() self.actual_fps = available_range.duration.rate # reframing handles conditions if (len(otio_review_clips) > 1) and (index == 0): # more clips | first clip reframing with handle start -= handle_start duration += handle_start elif len(otio_review_clips) > 1 \ and (index == len(otio_review_clips) - 1): # more clips | last clip reframing with handle duration += handle_end elif len(otio_review_clips) == 1: # one clip | add both handles start -= handle_start duration += (handle_start + handle_end) if available_range: available_range = self._trim_available_range( available_range, start, duration, self.actual_fps) # process all track items of the track if isinstance(r_otio_cl, otio.schema.Clip): # process Clip media_ref = r_otio_cl.media_reference metadata = media_ref.metadata is_sequence = None # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer if isinstance(media_ref, otio.schema.ImageSequenceReference): is_sequence = True else: # for OpenTimelineIO 0.12 and older if metadata.get("padding"): is_sequence = True if is_sequence: # file sequence way if hasattr(media_ref, "target_url_base"): dirname = media_ref.target_url_base head = media_ref.name_prefix tail = media_ref.name_suffix first, last = openpype.lib.otio_range_to_frame_range( available_range) collection = clique.Collection( head=head, tail=tail, padding=media_ref.frame_zero_padding) collection.indexes.update( [i for i in range(first, (last + 1))]) # render segment self._render_seqment(sequence=[dirname, collection]) # generate used frames self._generate_used_frames(len(collection.indexes)) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url collection_data = openpype.lib.make_sequence_collection( path, available_range, metadata) dir_path, collection = collection_data # render segment self._render_seqment(sequence=[dir_path, collection]) # generate used frames self._generate_used_frames(len(collection.indexes)) else: # single video file way path = media_ref.target_url # render video file to sequence self._render_seqment(video=[path, available_range]) # generate used frames self._generate_used_frames(available_range.duration.value) # QUESTION: what if nested track composition is in place? else: # at last process a Gap self._render_seqment(gap=duration) # generate used frames self._generate_used_frames(duration) # creating and registering representation representation = self._create_representation(start, duration) instance.data["representations"].append(representation) self.log.info(f"Adding representation: {representation}")
print scop else: scop = l1[0] print scop print("连续数字范围:{}".format(scop)) collections, remainder = clique.assemble(file_list) for collection in collections: print repr(collection) head = head_list[1] heada = '{0}.'.format(head) tail = tail_list[1] taila = '.{0}'.format(tail) collection = clique.Collection(head=heada, tail=taila, padding=4) for name in file_list: collection.add(name) print collection.is_contiguous() print collection.indexes print collection.holes() # # collection = clique.parse('/path/to/file.%04d.ext [1, 2, 5-10]') # print repr(collection) # collection = clique.parse( # '/path/to/file.%04d.ext [1-10] (2, 8)' # '{head}{padding}{tail} [{range}] ({holes})' # ) # print repr(collection)
def test_assemble(): '''Assemble collections from arbitrary items.''' items = [ 'file.ext', 'single.1.ext', '1', '3', '001', '003', 'head.001.tail', 'head.002.tail', 'head.1.tail', 'head.2.tail', 'head.001.tail3', 'head.002.tail3', 'head_v01.tail', 'head_v02.tail', 'head_v1.tail', 'head_v2.tail', 'head1_010_v1.0001.tail', 'head1_010_v1.0002.tail', 'head1_010_v2.0001.tail', 'head1_010_v2.0002.tail', 'head1_020_v1.0001.tail', 'head1_020_v1.0002.tail' ] collections, remainder = clique.assemble(items) expected = [ clique.Collection('', '', 0, indexes=set([1, 3])), clique.Collection('', '', 3, indexes=set([1, 3])), clique.Collection('head.', '.tail', 3, indexes=set([1, 2])), clique.Collection('head.', '.tail', 0, indexes=set([1, 2])), clique.Collection('head.', '.tail3', 3, indexes=set([1, 2])), clique.Collection('head_v', '.tail', 2, indexes=set([1, 2])), clique.Collection('head_v', '.tail', 0, indexes=set([1, 2])), clique.Collection('head1_010_v1.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_010_v2.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_020_v1.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_010_v', '.0001.tail', 0, indexes=set([1, 2])), clique.Collection('head1_010_v', '.0002.tail', 0, indexes=set([1, 2])), clique.Collection('head1_', '_v1.0001.tail', 3, indexes=set([10, 20])), clique.Collection('head1_', '_v1.0002.tail', 3, indexes=set([10, 20])) ] assert sorted(collections) == sorted(expected) expected = ['file.ext', 'single.1.ext'] assert sorted(remainder) == sorted(expected)
collections, _ = clique.assemble(items, minimum_items=3) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])) ] assert collections == expected collections, _ = clique.assemble(items, minimum_items=5) expected = [] assert collections == expected @pytest.mark.parametrize(('items', 'expected'), [ ( ['0999', '1000', '1001', '9999'], [ clique.Collection('', '', 4, indexes=set([999, 1000, 1001, 9999])) ] ), ( ['999', '0999', '1000', '1001', '9999'], [ clique.Collection('', '', 4, indexes=set([999, 1000, 1001, 9999])), clique.Collection('', '', 0, indexes=set([999, 1000, 1001, 9999])) ] ), ( ['0999', '1000', '9999', '10001'], [ clique.Collection('', '', 4, indexes=set([999, 1000, 9999])), clique.Collection('', '', 0, indexes=set([1000, 9999, 10001])) ]
def process(self, instance): item = instance[0] file_path = item.source().mediaSource().fileinfos()[0].filename() fps = item.sequence().framerate().toFloat() # Get handles. handles = 0 if "handles" in instance.data["families"]: for tag in instance[0].tags(): data = tag.metadata().dict() if "handles" == data.get("tag.family", ""): handles = int(data["tag.value"]) # Get reverse, retime, first and last frame reverse = False if item.playbackSpeed() < 0: reverse = True retime = False if item.playbackSpeed() != 1.0: retime = True first_frame = int(item.sourceIn() + 1) - handles first_frame_offset = 1 last_frame = int(item.sourceOut() + 1) + handles last_frame_offset = last_frame - first_frame + 1 if reverse: first_frame = int(item.sourceOut() + 1) first_frame_offset = 1 last_frame = int(item.sourceIn() + 1) last_frame_offset = last_frame - first_frame + 1 # Get resolution width = item.parent().parent().format().width() height = item.parent().parent().format().height() # Creating shot nuke script nukeWriter = hiero.core.nuke.ScriptWriter() # Root node root_node = hiero.core.nuke.RootNode(first_frame_offset, last_frame_offset, fps=fps) if retime: last_frame = abs( int(round(last_frame_offset / item.playbackSpeed()))) root_node = hiero.core.nuke.RootNode(first_frame_offset, last_frame, fps=fps) fmt = item.parent().parent().format() root_node.setKnob("format", "{0} {1}".format(fmt.width(), fmt.height())) nukeWriter.addNode(root_node) # Primary read node read_node = hiero.core.nuke.ReadNode(file_path, width=width, height=height, firstFrame=first_frame, lastFrame=last_frame + 1) read_node.setKnob("frame_mode", 2) read_node.setKnob("frame", str(first_frame - 1)) nukeWriter.addNode(read_node) last_node = read_node if reverse or retime: last_frame = last_frame_offset if retime: last_frame = abs( int(round(last_frame_offset / item.playbackSpeed()))) retime_node = hiero.core.nuke.RetimeNode(first_frame_offset, last_frame_offset, first_frame_offset, last_frame, reverse=reverse) retime_node.setKnob("shutter", 0) retime_node.setInputNode(0, read_node) nukeWriter.addNode(retime_node) last_node = retime_node # Create write node write_path = os.path.join( os.path.dirname(instance.context.data["currentFile"]), "workspace", item.parent().parent().name(), item.parent().name(), item.name() + ".%04d.exr") frame_padding = len(str(last_frame)) if frame_padding < 4: frame_padding = 4 padding_string = "%{0}d".format(str(frame_padding).zfill(2)) write_path = write_path.replace("%04d", padding_string) write_node = hiero.core.nuke.WriteNode(write_path, inputNode=last_node) write_node.setKnob("file_type", "exr") write_node.setKnob("metadata", "all metadata") write_node.setName(instance.data["name"]) nukeWriter.addNode(write_node) # Secondary read nodes seq = item.parent().parent() time_in = item.timelineIn() time_out = item.timelineOut() items = [] for count in range(time_in, time_out): items.extend(seq.trackItemsAt(count)) items = list(set(items)) items.remove(item) last_frame = abs(int(round(last_frame_offset / item.playbackSpeed()))) for i in items: src = i.source().mediaSource().fileinfos()[0].filename() in_frame = i.mapTimelineToSource(time_in) + 1 - handles out_frame = i.mapTimelineToSource(time_out) + 1 + handles read_node = hiero.core.nuke.ReadNode(src, width=width, height=height, firstFrame=in_frame, lastFrame=out_frame) nukeWriter.addNode(read_node) retime_node = hiero.core.nuke.RetimeNode(in_frame, out_frame, first_frame_offset, last_frame) retime_node.setKnob("shutter", 0) retime_node.setInputNode(0, read_node) nukeWriter.addNode(retime_node) # Get file path file_path = os.path.join( os.path.dirname(instance.context.data["currentFile"]), "workspace", item.parent().parent().name(), item.parent().name(), item.name() + ".0001.nk") # Create directories if not os.path.exists(os.path.dirname(file_path)): os.makedirs(os.path.dirname(file_path)) # Create nuke script nukeWriter.writeToDisk(file_path) self.log.info("Writing Nuke script to: \"%s\"" % file_path) collection = clique.Collection(head=file_path.replace("0001.nk", ""), padding=4, tail=".nk") collection.add(file_path) instance.data["collection"] = collection
def transcode(self, instance, write_path, tag_type): import os import time import hiero import clique item = instance[0] # Get handles. handles = 0 if "handles" in instance.data["families"]: for tag in instance[0].tags(): data = tag.metadata().dict() if "handles" == data.get("tag.family", ""): handles = int(data["tag.value"]) # Get reverse, retime, first and last frame reverse = False if item.playbackSpeed() < 0: reverse = True first_frame = int(item.sourceIn() + 1) - handles first_frame_offset = 1 last_frame = int(item.sourceOut() + 1) + handles last_frame_offset = last_frame - first_frame + 1 if reverse: first_frame = int(item.sourceOut() + 1) first_frame_offset = 1 last_frame = int(item.sourceIn() + 1) last_frame_offset = last_frame - first_frame + 1 # Creating transcode Nuke script nukeWriter = hiero.core.nuke.ScriptWriter() root_node = hiero.core.nuke.RootNode(first_frame_offset, last_frame_offset) nukeWriter.addNode(root_node) item.addToNukeScript(script=nukeWriter, firstFrame=first_frame_offset, includeRetimes=True, retimeMethod="Frame", startHandle=handles, endHandle=handles) # Create Nuke script frame_padding = len(str(last_frame)) if frame_padding < 4: frame_padding = 4 padding_string = "%{0}d".format(str(frame_padding).zfill(2)) write_path = write_path.replace("%04d", padding_string) write_node = hiero.core.nuke.WriteNode(write_path) if tag_type == "jpeg": write_node.setKnob("file_type", "jpeg") write_node.setKnob("_jpeg_quality", "1") if tag_type in ["h264", "h264_half"]: write_node.setKnob("file_type", "mov") write_node.setKnob("meta_codec", "avc1") write_node.setKnob("mov32_fps", item.parent().parent().framerate().toFloat()) if tag_type.endswith("_half"): reformat_node = hiero.core.nuke.ReformatNode(scale=0.5, to_type="scale") nukeWriter.addNode(reformat_node) nukeWriter.addNode(write_node) script_path = os.path.join( os.path.dirname(instance.context.data["currentFile"]), "workspace", "{0}_{1}.nk".format(item.name(), tag_type)) nukeWriter.writeToDisk(script_path) # Execute Nuke script logFileName = write_path + ".log" process = hiero.core.nuke.executeNukeScript(script_path, open(logFileName, "w")) while process.poll() is None: time.sleep(0.5) # Create collection split = write_path.split(padding_string) collection = clique.Collection(head=split[0], tail=split[1], padding=frame_padding) if write_path.endswith(".mov"): collection.add(write_path % last_frame_offset) else: for count in range(first_frame_offset, last_frame_offset + 1): collection.add(write_path % count) # Validate output and clean up missing_files = [] for f in collection: if not os.path.exists(f): missing_files.append(f) if missing_files: msg = "Files were not transcoded: {0}".format(missing_files) raise ValueError(msg) os.remove(logFileName) os.remove(script_path) collection.tag_type = tag_type return collection
def process(self, context): # Collect sets named starting with "remote". remote_members = [] for object_set in pm.ls(type="objectSet"): if object_set.name().lower().startswith("remote"): remote_members.extend(object_set.members()) for object_set in pm.ls(type="objectSet"): if object_set.nodeType() != "objectSet": continue if not self.validate_set(object_set): continue # Exclude specific sets invalid_sets = [ "lightEditorRoot", "defaultLightSet", "defaultObjectSet" ] if object_set.name() in invalid_sets: continue extensions = { "mayaAscii": "ma", "mayaBinary": "mb", "alembic": "abc" } family_mappings = { "mayaAscii": "scene", "mayaBinary": "scene", "alembic": "cache" } # Checking instance type. instance_type = "local" if object_set in remote_members: instance_type = "remote" # Add an instance per format supported. for fmt in ["mayaBinary", "mayaAscii", "alembic"]: # Remove illegal disk characters name = object_set.name().replace(":", "_") instance = context.create_instance(name=name) instance.add(object_set) families = [fmt, family_mappings[fmt], instance_type] instance.data["families"] = families instance.data["family"] = family_mappings[fmt] label = "{0} - {1} - {2}".format(name, fmt, instance_type) instance.data["label"] = label # Adding/Checking publish attribute instance.data["publish"] = False if hasattr(object_set, fmt): attr = pm.Attribute(object_set.name() + "." + fmt) instance.data["publish"] = attr.get() else: pm.addAttr(object_set, longName=fmt, defaultValue=False, attributeType="bool") attr = pm.Attribute(object_set.name() + "." + fmt) pm.setAttr(attr, channelBox=True) # Generate collection filename = os.path.splitext( os.path.basename(context.data["currentFile"]))[0] path = os.path.join( os.path.dirname(context.data["currentFile"]), "workspace", filename) head = "{0}_{1}.".format(path, name) tail = "." + extensions[fmt] collection = clique.Collection(head=head, padding=4, tail=tail) frame_start = int(pm.playbackOptions(query=True, minTime=True)) collection.add(head + str(frame_start).zfill(4) + tail) instance.data["collection"] = collection
def process(self, instance): import os import tempfile import shutil import clique import nuke # Store selection selection = [i for i in nuke.allNodes() if i["selected"].getValue()] # Deselect all nodes to prevent external connections [i["selected"].setValue(False) for i in nuke.allNodes()] temporary_nodes = [] # Create nodes first_frame = min(instance.data["collection"].indexes) last_frame = max(instance.data["collection"].indexes) temp_dir = tempfile.mkdtemp() for f in instance.data["collection"]: shutil.copy(f, os.path.join(temp_dir, os.path.basename(f))) node = previous_node = nuke.createNode("Read") node["file"].setValue( os.path.join( temp_dir, os.path.basename(instance.data["collection"].format( "{head}{padding}{tail}"))).replace("\\", "/")) node["first"].setValue(first_frame) node["origfirst"].setValue(first_frame) node["last"].setValue(last_frame) node["origlast"].setValue(last_frame) index = instance[0]["colorspace"].getValue() node["colorspace"].setValue(node["colorspace"].enumName(int(index))) temporary_nodes.append(node) # Reformat for pixelaspect ratio node = previous_node = nuke.createNode("Reformat") node["type"].setValue(2) nuke.selectedNode()["scale"].setValue( [1, 1.0 / instance[0].pixelAspect()]) node["resize"].setValue(5) temporary_nodes.append(node) # Baked viewer node viewer_process_node = nuke.ViewerProcess.node() dag_node = None if viewer_process_node: dag_node = nuke.createNode(viewer_process_node.Class()) dag_node.setInput(0, previous_node) previous_node = dag_node temporary_nodes.append(dag_node) # Copy viewer process values excludedKnobs = ["name", "xpos", "ypos"] for item in viewer_process_node.knobs().keys(): if item not in excludedKnobs and item in dag_node.knobs(): x1 = viewer_process_node[item] x2 = dag_node[item] x2.fromScript(x1.toScript(False)) else: self.log.warning("No viewer node found.") viewer_nodes = nuke.allNodes(filter="Viewer") if viewer_nodes: viewer_node = nuke.allNodes(filter="Viewer")[0] input_process_node = nuke.toNode( viewer_node["input_process_node"].value()) if viewer_node["input_process"].value() and input_process_node: input_process_node = self.duplicate_node( nuke.toNode(viewer_node["input_process_node"].value())) input_process_node.setInput(0, previous_node) previous_node = input_process_node temporary_nodes.append(input_process_node) write_node = nuke.createNode("Write") head = instance.data["collection"].format("{head}_review.") if instance.data["collection"].format("{head}").endswith("."): head = instance.data["collection"].format("{head}")[:-1] head += "_review." review_collection = clique.Collection(head=head, padding=4, tail=".jpeg") review_collection.indexes.update(instance.data["collection"].indexes) write_node["file"].setValue( review_collection.format("{head}{padding}{tail}").replace( "\\", "/")) write_node["file_type"].setValue("jpeg") write_node["raw"].setValue(1) write_node["_jpeg_quality"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) instance.data["review_collection"] = review_collection # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) # Clean up for node in temporary_nodes: nuke.delete(node) shutil.rmtree(temp_dir) # Restore selection [i["selected"].setValue(False) for i in nuke.allNodes()] [i["selected"].setValue(True) for i in selection]
def process(self, instance): staging_dir = self.staging_dir(instance) self.log.info("Outputting shot to {}".format(staging_dir)) editorial_path = instance.context.data["editorialPath"] basename = os.path.splitext(os.path.basename(editorial_path))[0] # Generate mov file. fps = pype.lib.get_asset()["data"]["fps"] input_path = os.path.join(os.path.dirname(editorial_path), basename + ".mov") shot_mov = os.path.join(staging_dir, instance.data["name"] + ".mov") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") args = [ ffmpeg_path, "-ss", str(instance.data["frameStart"] / fps), "-i", input_path, "-t", str((instance.data["frameEnd"] - instance.data["frameStart"] + 1) / fps), "-crf", "18", "-pix_fmt", "yuv420p", shot_mov ] self.log.info(f"Processing: {args}") output = pype.lib._subprocess(args) self.log.info(output) instance.data["representations"].append({ "name": "mov", "ext": "mov", "files": os.path.basename(shot_mov), "stagingDir": staging_dir, "frameStart": instance.data["frameStart"], "frameEnd": instance.data["frameEnd"], "fps": fps, "thumbnail": True, "tags": ["review", "ftrackreview"] }) # Generate jpegs. shot_jpegs = os.path.join(staging_dir, instance.data["name"] + ".%04d.jpeg") args = [ffmpeg_path, "-i", shot_mov, shot_jpegs] self.log.info(f"Processing: {args}") output = pype.lib._subprocess(args) self.log.info(output) collection = clique.Collection(head=instance.data["name"] + ".", tail='.jpeg', padding=4) for f in os.listdir(staging_dir): if collection.match(f): collection.add(f) instance.data["representations"].append({ "name": "jpeg", "ext": "jpeg", "files": list(collection), "stagingDir": staging_dir }) # Generate wav file. shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav") args = [ffmpeg_path, "-i", shot_mov, shot_wav] self.log.info(f"Processing: {args}") output = pype.lib._subprocess(args) self.log.info(output) instance.data["representations"].append({ "name": "wav", "ext": "wav", "files": os.path.basename(shot_wav), "stagingDir": staging_dir }) # Required for extract_review plugin (L222 onwards). instance.data["fps"] = fps
def process(self, instance): tags = ["review", "ftrackreview"] # get reviewable item from `review` instance.data attribute self.main_clip = instance.data.get("item") self.rw_clip = self.get_review_item(instance) # let user know there is missing review clip and convert instance # back as not reviewable assert self.rw_clip, "Missing reviewable clip for '{}'".format( self.main_clip.name()) # add to representations if not instance.data.get("representations"): instance.data["representations"] = list() # get review media main info rw_source = self.rw_clip.source().mediaSource() rw_source_duration = int(rw_source.duration()) self.rw_source_path = rw_source.firstpath() rw_source_file_info = rw_source.fileinfos().pop() # define if review media is sequence is_sequence = bool(not rw_source.singleFile()) self.log.debug("is_sequence: {}".format(is_sequence)) # get handles handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] # review timeline and source frame ranges rw_clip_in = int(self.rw_clip.timelineIn()) rw_clip_out = int(self.rw_clip.timelineOut()) self.rw_clip_source_in = int(self.rw_clip.sourceIn()) self.rw_clip_source_out = int(self.rw_clip.sourceOut()) rw_source_first = int(rw_source_file_info.startFrame()) # calculate delivery source_in and source_out # main_clip_timeline_in - review_item_timeline_in + 1 main_clip_in = self.main_clip.timelineIn() main_clip_out = self.main_clip.timelineOut() source_in_diff = main_clip_in - rw_clip_in source_out_diff = main_clip_out - rw_clip_out if source_in_diff: self.rw_clip_source_in += source_in_diff if source_out_diff: self.rw_clip_source_out += source_out_diff # review clip durations rw_clip_duration = (self.rw_clip_source_out - self.rw_clip_source_in) + 1 rw_clip_duration_h = rw_clip_duration + (handle_start + handle_end) # add created data to review item data instance.data["reviewItemData"] = {"mediaDuration": rw_source_duration} file_dir = os.path.dirname(self.rw_source_path) file = os.path.basename(self.rw_source_path) ext = os.path.splitext(file)[-1] # detect if sequence if not is_sequence: # is video file files = file else: files = list() spliter, padding = get_sequence_pattern_and_padding(file) self.log.debug("_ spliter, padding: {}, {}".format( spliter, padding)) base_name = file.split(spliter)[0] # define collection and calculate frame range collection = clique.Collection( base_name, ext, padding, set( range( int(rw_source_first + int(self.rw_clip_source_in - handle_start)), int(rw_source_first + int(self.rw_clip_source_out + handle_end) + 1)))) self.log.debug("_ collection: {}".format(collection)) real_files = os.listdir(file_dir) self.log.debug("_ real_files: {}".format(real_files)) # collect frames to repre files list for item in collection: if item not in real_files: self.log.debug("_ item: {}".format(item)) continue files.append(item) # add prep tag tags.extend(["prep", "delete"]) # change label instance.data["label"] = "{0} - ({1})".format(instance.data["label"], ext) self.log.debug("Instance review: {}".format(instance.data["name"])) # adding representation for review mov representation = { "files": files, "stagingDir": file_dir, "frameStart": rw_source_first + self.rw_clip_source_in, "frameEnd": rw_source_first + self.rw_clip_source_out, "frameStartFtrack": int(self.rw_clip_source_in - handle_start), "frameEndFtrack": int(self.rw_clip_source_out + handle_end), "step": 1, "fps": instance.data["fps"], "name": "review", "tags": tags, "ext": ext[1:] } if rw_source_duration > rw_clip_duration_h: self.log.debug("Media duration higher: {}".format( (rw_source_duration - rw_clip_duration_h))) representation.update({ "frameStart": rw_source_first + int(self.rw_clip_source_in - handle_start), "frameEnd": rw_source_first + int(self.rw_clip_source_out + handle_end), "tags": ["_cut-bigger", "prep", "delete"] }) elif rw_source_duration < rw_clip_duration_h: self.log.debug("Media duration higher: {}".format( (rw_source_duration - rw_clip_duration_h))) representation.update({ "frameStart": rw_source_first + int(self.rw_clip_source_in - handle_start), "frameEnd": rw_source_first + int(self.rw_clip_source_out + handle_end), "tags": ["prep", "delete"] }) instance.data["representations"].append(representation) self.create_thumbnail(instance) self.log.debug("Added representations: {}".format( instance.data["representations"]))
def process(self, context): import os import json import clique current_file = context.data("currentFile") # Skip if current file is not a directory if not os.path.isdir(current_file): return # Traverse directory and collect collections from json files. instances = [] for root, dirs, files in os.walk(current_file): for f in files: if f.endswith(".json"): with open(os.path.join(root, f)) as json_data: for data in json.load(json_data): instances.append(data) # Validate instance based on supported families. valid_families = ["img", "cache", "scene", "mov"] valid_data = [] for data in instances: families = data.get("families", []) + [data["family"]] family_type = list(set(families) & set(valid_families)) if family_type: valid_data.append(data) # Create existing output instance. scanned_dirs = [] files = [] collections = [] for data in valid_data: if "collection" not in data.keys(): continue if data["collection"] is None: continue instance_collection = clique.parse(data["collection"]) try: version = self.version_get( os.path.basename(instance_collection.format()), "v")[1] except: # Ignore any output that is not versioned continue # Getting collections of all previous versions and current version for count in range(1, int(version) + 1): # Generate collection version_string = "v" + str(count).zfill(len(version)) head = instance_collection.head.replace( "v" + version, version_string) collection = clique.Collection( head=head.replace("\\", "/"), padding=instance_collection.padding, tail=instance_collection.tail) collection.version = count # Scan collection directory scan_dir = os.path.dirname(collection.head) if scan_dir not in scanned_dirs and os.path.exists(scan_dir): for f in os.listdir(scan_dir): file_path = os.path.join(scan_dir, f) files.append(file_path.replace("\\", "/")) scanned_dirs.append(scan_dir) # Match files to collection and add for f in files: if collection.match(f): collection.add(f) # Skip if no files were found in the collection if not list(collection): continue # Skip existing collections if collection in collections: continue instance = context.create_instance(name=data["name"]) version = self.version_get( os.path.basename(collection.format()), "v")[1] basename = os.path.basename(collection.format()) instance.data["label"] = "{0} - {1}".format( data["name"], basename) families = data["families"] + [data["family"]] family = list(set(valid_families) & set(families))[0] instance.data["family"] = family instance.data["families"] = ["output"] instance.data["collection"] = collection instance.data["version"] = int(version) instance.data["publish"] = False collections.append(collection)
def process(self, instance): # Use RenderSequence if in 2017+ if cmds.about(version=True) >= 2017: # Store current layer for later current_layer = pm.nodetypes.RenderLayer.currentLayer() # Disble all render layer apart from the one that needs to render layers_state = [] for layer in pm.ls(type="renderLayer"): layers_state.append((layer.renderable, layer.renderable.get())) if layer == instance[0]: layer.renderable.set(True) layer.setCurrent() else: layer.renderable.set(False) # Setup render sequence settings. We are rendering all layers, # but since they are all disabled except the desired layer, # only that layer will be outputted. settings = { "renderSequenceRegion": 0, "renderSequenceAllLayers": 1, "renderSequenceAllCameras": 1, "renderSequenceAddToRenderView": 0, "renderSequenceAddAllLayers": 0, "renderSequenceAddAllCameras": 0 } for key, value in settings.iteritems(): cmds.optionVar(intValue=(key, value)) # Execute cmds.RenderSequence() cmds.RenderSequence() # Restore layers state for attr, value in layers_state: attr.set(value) # Restore current layer current_layer.setCurrent() else: # Execute render in separate process. exe = os.path.dirname(sys.executable) render_executable = os.path.join(exe, "Render") layer_name = instance[0].name() scene_path = instance.context.data["currentFile"] project_directory = str(pm.system.Workspace.getPath().expand()) args = [ render_executable, "-rl", layer_name, scene_path, "-proj", project_directory ] self.log.debug("Executing: " + str(args)) subprocess.call(args) # Check output files. output_collection = instance.data["collection"] collection = clique.Collection(head=output_collection.head, padding=output_collection.padding, tail=output_collection.tail) for f in instance.data["collection"]: if os.path.exists(f): collection.add(f) # Check tmp directory. Maya can sometimes render to the wrong folder. # Don't know why, and can't replicate. if not list(collection): collection = clique.Collection(head=output_collection.head.replace( "workspace", "workspace/tmp"), padding=output_collection.padding, tail=output_collection.tail) for f in os.listdir(os.path.dirname(collection.format())): f = os.path.join(os.path.dirname(collection.format()), f).replace("\\", "/") if collection.match(f): collection.add(f) instance.data["collection"] = collection
def process(self, context): # Validate instance based on support families. valid_families = ["img", "cache", "scene", "mov"] valid_instances = [] for instance in context: families = instance.data.get("families", []) family_type = list(set(families) & set(valid_families)) if family_type: valid_instances.append(instance) # Create existing output instance. scanned_dirs = [] files = [] for instance in valid_instances: instance_collection = instance.data.get("collection", None) if not instance_collection: continue version = self.version_get( os.path.basename(instance_collection.format()), "v")[1] # Getting collections of all previous versions and current version collections = [] for count in range(1, int(version) + 1): # Generate collection version_string = "v" + str(count).zfill(len(version)) head = instance_collection.head.replace( "v" + version, version_string) collection = clique.Collection( head=head.replace("\\", "/"), padding=instance_collection.padding, tail=instance_collection.tail) collection.version = count # Scan collection directory scan_dir = os.path.dirname(collection.head) if scan_dir not in scanned_dirs and os.path.exists(scan_dir): for f in os.listdir(scan_dir): file_path = os.path.join(scan_dir, f) files.append(file_path.replace("\\", "/")) scanned_dirs.append(scan_dir) # Match files to collection and add for f in files: if collection.match(f): collection.add(f) if list(collection): collections.append(collection) if collections: families = set(valid_families) & set(instance.data["families"]) for collection in collections: name = instance.data["name"] new_instance = instance.context.create_instance(name=name) label = instance.data["label"].split("-")[0] + "- " fmt = "{head}{padding}{tail}" label += os.path.basename(collection.format(fmt)) label += collection.format(" [{ranges}]") new_instance.data["label"] = label new_instance.data["families"] = list(families) + ["output"] new_instance.data["family"] = list(families)[0] new_instance.data["publish"] = False new_instance.data["collection"] = collection new_instance.data["version"] = collection.version for node in instance: new_instance.add(node)
def process_sequence(self, repre_path, anatomy, anatomy_name, anatomy_data): dir_path, file_name = os.path.split(str(repre_path)) base_name, ext = os.path.splitext(file_name) file_name_items = None if "#" in base_name: file_name_items = [part for part in base_name.split("#") if part] elif "%" in base_name: file_name_items = base_name.split("%") if not file_name_items: msg = "Source file was not found" self.report_items[msg].append(repre_path) self.log.warning("{} <{}>".format(msg, repre_path)) return src_collections, remainder = clique.assemble(os.listdir(dir_path)) src_collection = None for col in src_collections: if col.tail != ext: continue # skip if collection don't have same basename if not col.head.startswith(file_name_items[0]): continue src_collection = col break if src_collection is None: # TODO log error! msg = "Source collection of files was not found" self.report_items[msg].append(repre_path) self.log.warning("{} <{}>".format(msg, repre_path)) return frame_indicator = "@####@" anatomy_data["frame"] = frame_indicator anatomy_filled = anatomy.format(anatomy_data) delivery_path = anatomy_filled["delivery"][anatomy_name] print(delivery_path) delivery_folder = os.path.dirname(delivery_path) dst_head, dst_tail = delivery_path.split(frame_indicator) dst_padding = src_collection.padding dst_collection = clique.Collection(head=dst_head, tail=dst_tail, padding=dst_padding) if not os.path.exists(delivery_folder): os.makedirs(delivery_folder) src_head = src_collection.head src_tail = src_collection.tail for index in src_collection.indexes: src_padding = src_collection.format("{padding}") % index src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) src = os.path.normpath(os.path.join(dir_path, src_file_name)) dst_padding = dst_collection.format("{padding}") % index dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) self.copy_file(src, dst)
def process(self, instance): # add to representations if not instance.data.get("representations"): instance.data["representations"] = list() self.main_clip = instance.data["item"] # get plate source attributes source_media = instance.data["sourceMedia"] source_path = instance.data["sourcePath"] source_first = instance.data["sourceFirst"] frame_start = instance.data["frameStart"] frame_end = instance.data["frameEnd"] handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] source_in = instance.data["sourceIn"] source_out = instance.data["sourceOut"] source_in_h = instance.data["sourceInH"] source_out_h = instance.data["sourceOutH"] # define if review media is sequence is_sequence = bool(not source_media.singleFile()) self.log.debug("is_sequence: {}".format(is_sequence)) file_dir = os.path.dirname(source_path) file = os.path.basename(source_path) ext = os.path.splitext(file)[-1] # detect if sequence if not is_sequence: # is video file files = file else: files = list() spliter, padding = self.detect_sequence(file) self.log.debug("_ spliter, padding: {}, {}".format( spliter, padding)) base_name = file.split(spliter)[0] # define collection and calculate frame range collection = clique.Collection( base_name, ext, padding, set( range(int(source_first + source_in_h), int(source_first + source_out_h) + 1))) self.log.debug("_ collection: {}".format(collection)) real_files = os.listdir(file_dir) self.log.debug("_ real_files: {}".format(real_files)) # collect frames to repre files list self.handle_start_exclude = list() self.handle_end_exclude = list() for findex, item in enumerate(collection): if item not in real_files: self.log.debug("_ item: {}".format(item)) test_index = findex + int(source_first + source_in_h) test_start = int(source_first + source_in) test_end = int(source_first + source_out) if (test_index < test_start): self.handle_start_exclude.append(test_index) elif (test_index > test_end): self.handle_end_exclude.append(test_index) continue files.append(item) # change label instance.data["label"] = "{0} - ({1})".format(instance.data["label"], ext) self.log.debug("Instance review: {}".format(instance.data["name"])) # adding representation for review mov representation = { "files": files, "stagingDir": file_dir, "frameStart": frame_start - handle_start, "frameEnd": frame_end + handle_end, "name": ext[1:], "ext": ext[1:] } instance.data["representations"].append(representation) self.version_data(instance) self.log.debug("Added representations: {}".format( instance.data["representations"])) self.log.debug("instance.data: {}".format(instance.data))