def process(self, context): job = context.data("deadlineJob") # create instance value = job.GetJobExtraInfoKeyValueWithDefault("PyblishInstanceData", "") if not value: return instance_data = json.loads(value) # return early if it isn't a render file render_families = ["ifd"] if not list(set(instance_data["families"]) & set(render_families)): return instance = context.create_instance(name=instance_data["name"]) for key in instance_data.keys(): instance.data[key] = instance_data[key] instance.data["families"] = ["img", "remote", "deadline"] # setting job data job_data = {} job_data["Plugin"] = "Mantra" job_data["Frames"] = job.JobFrames job_data["Name"] = job.Name job_data["JobDependency0"] = job.JobId job_data["IsFrameDependent"] = True frame_count = 0 for f in job.JobFramesList: frame_count += 1 if frame_count > 5000: job_data["ChunkSize"] = int(math.ceil(frame_count / 5000.0)) collection = clique.parse(instance_data["render"]) fmt = "{head}" + "#" * collection.padding + "{tail}" job_data["OutputFilename0"] = collection.format(fmt) # Copy environment keys. index = 0 if job.GetJobEnvironmentKeys(): for key in job.GetJobEnvironmentKeys(): value = job.GetJobEnvironmentKeyValue(key) data = "{0}={1}".format(key, value) job_data["EnvironmentKeyValue" + str(index)] = data index += 1 # setting plugin data plugin_data = {} plugin_data["Version"] = job.GetJobPluginInfoKeyValue("Version") collection = clique.parse(instance_data["collection"]) plugin_data["SceneFile"] = list(collection)[0] # setting data data = {"job": job_data, "plugin": plugin_data} instance.data["deadlineData"] = data
def process(self, context): import os import json import clique job = context.data["deadlineJob"] data = job.GetJobExtraInfoKeyValueWithDefault("PyblishInstanceData", "") if not data: return data = json.loads(data) # Remove all files that does not exist. collection = clique.parse(data["collection"]) for f in collection: if not os.path.exists(f): collection.remove(f) # Creating instance if collections exists. if list(collection): instance = context.create_instance(name=data["name"]) for key in data: instance.data[key] = data[key] # Prevent resubmitting same job and changing the processing # location to local. del instance.data["deadlineData"] instance.data["families"].remove("deadline") instance.data["families"].append("local") instance.data["collection"] = collection
def process(self, instance): for data in instance.data.get("ftrackComponentsList", []): location = data.get( "component_location", instance.context.data["ftrackSession"].pick_location()) if location["name"] == "ftrack.server": continue component = data.get("component", None) if not component: continue # Create destination directory resource_identifier = location.get_resource_identifier(component) if not os.path.exists(os.path.dirname(resource_identifier)): os.makedirs(os.path.dirname(resource_identifier)) collection = instance.data.get("collection", None) if collection: target_collection = clique.parse( resource_identifier, pattern="{head}{padding}{tail}") for f in collection: dst = f.replace(collection.head, target_collection.head) # If the files are the same, continue if os.path.exists(dst) and filecmp.cmp(f, dst): self.log.debug("\"{0}\" is the same as \"{1}\". " "Skipping...".format(f, dst)) continue # Delete existing files if overwriting. if data.get("component_overwrite", False): if os.path.exists(dst): os.remove(dst) if not os.path.exists(dst): self.manage_data(f, dst) output_path = instance.data.get("output_path", "") if output_path: # If the files are the same, continue if (os.path.exists(resource_identifier) and filecmp.cmp(output_path, resource_identifier)): self.log.debug("\"{0}\" is the same as \"{1}\". " "Skipping...".format( output_path, resource_identifier)) return # Delete existing file if overwriting if data.get("component_overwrite", False): if os.path.exists(resource_identifier): os.remove(resource_identifier) if not os.path.exists(resource_identifier): self.manage_data(output_path, resource_identifier)
def process(self, context): # Get remote nodes remote_nodes = [] for node in nuke.allNodes(): if node.Class() == "BackdropNode": if node.name().lower().startswith("remote"): remote_nodes.extend(node.getNodes()) remote_nodes = list(set(remote_nodes)) # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Write": continue # Determine output type output_type = "img" if node["file_type"].value() == "mov": output_type = "mov" # Determine processing location from backdrops process_place = "local" if node in remote_nodes: process_place = "remote" # Create instance instance = context.create_instance(name=node.name()) instance.data["families"] = ["write", process_place, output_type] instance.data["family"] = output_type instance.add(node) label = "{0} - write - {1}" instance.data["label"] = label.format(node.name(), process_place) instance.data["publish"] = not node["disable"].getValue() # Get frame range start_frame = int(nuke.root()["first_frame"].getValue()) end_frame = int(nuke.root()["last_frame"].getValue()) if node["use_limit"].getValue(): start_frame = int(node["first"].getValue()) end_frame = int(node["last"].getValue()) # Add collection collection = None try: path = "" if nuke.filename(instance[0]): path = nuke.filename(instance[0]) path += " [{0}-{1}]".format(start_frame, end_frame) collection = clique.parse(path) except Exception as e: self.log.warning(e) instance.data["collection"] = collection
def process(self, context): import clique import nuke instances = [] # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Write": continue # Determine output type output_type = "img" if node["file_type"].value() == "mov": output_type = "mov" # Create instance instance = api.Instance(node.name()) instance.data["family"] = output_type instance.add(node) instance.data["label"] = node.name() instance.data["publish"] = False # Get frame range start_frame = int(nuke.root()["first_frame"].getValue()) end_frame = int(nuke.root()["last_frame"].getValue()) if node["use_limit"].getValue(): start_frame = int(node["first"].getValue()) end_frame = int(node["last"].getValue()) # Add collection collection = None try: path = "" if nuke.filename(node): path = nuke.filename(node) path += " [{0}-{1}]".format(start_frame, end_frame) collection = clique.parse(path) except ValueError: # Ignore the exception when the path does not match the # collection. pass instance.data["collection"] = collection instances.append(instance) context.data["write_instances"] = instances context.data["instances"] = ( context.data.get("instances", []) + instances )
def create_otio_reference(media_pool_item): metadata = _get_metadata_media_pool_item(media_pool_item) mp_clip_property = media_pool_item.GetClipProperty() path = mp_clip_property["File Path"] reformat_path = utils.get_reformated_path(path, padded=True) padding = utils.get_padding_from_path(path) if padding: metadata.update({"isSequence": True, "padding": padding}) # get clip property regarding to type mp_clip_property = media_pool_item.GetClipProperty() fps = float(mp_clip_property["FPS"]) if mp_clip_property["Type"] == "Video": frame_start = int(mp_clip_property["Start"]) frame_duration = int(mp_clip_property["Frames"]) else: audio_duration = str(mp_clip_property["Duration"]) frame_start = 0 frame_duration = int( utils.timecode_to_frames(audio_duration, float(fps))) otio_ex_ref_item = None if padding: # if it is file sequence try to create `ImageSequenceReference` # the OTIO might not be compatible so return nothing and do it old way try: dirname, filename = os.path.split(path) collection = clique.parse(filename, '{head}[{ranges}]{tail}') padding_num = len(re.findall("(\\d+)(?=-)", filename).pop()) otio_ex_ref_item = otio.schema.ImageSequenceReference( target_url_base=dirname + os.sep, name_prefix=collection.format("{head}"), name_suffix=collection.format("{tail}"), start_frame=frame_start, frame_zero_padding=padding_num, rate=fps, available_range=create_otio_time_range(frame_start, frame_duration, fps)) except AttributeError: pass if not otio_ex_ref_item: # in case old OTIO or video file create `ExternalReference` otio_ex_ref_item = otio.schema.ExternalReference( target_url=reformat_path, available_range=create_otio_time_range(frame_start, frame_duration, fps)) # add metadata to otio item add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata) return otio_ex_ref_item
def process(self, context): instances = [] # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Write": continue # Determine output type output_type = "img" if node["file_type"].value() == "mov": output_type = "mov" # Create instance instance = pyblish.api.Instance(node.name()) instance.data["family"] = output_type instance.add(node) instance.data["label"] = node.name() instance.data["publish"] = False # Get frame range start_frame = int(nuke.root()["first_frame"].getValue()) end_frame = int(nuke.root()["last_frame"].getValue()) if node["use_limit"].getValue(): start_frame = int(node["first"].getValue()) end_frame = int(node["last"].getValue()) # Add collection collection = None try: path = "" if nuke.filename(node): path = nuke.filename(node) path += " [{0}-{1}]".format(start_frame, end_frame) collection = clique.parse(path) except ValueError: # Ignore the exception when the path does not match the # collection. pass instance.data["collection"] = collection instances.append(instance) context.data["write_instances"] = instances context.data["instances"] = (context.data.get("instances", []) + instances)
def process(self, instance): for data in instance.data.get("ftrackComponentsList", []): path = data["component_path"] if "component" in data and "workspace" in path: try: collection = clique.parse(path) for f in collection: os.remove(f) self.log.info("Deleted: \"{0}\"".format(f)) except: os.remove(path) self.log.info("Deleted: \"{0}\"".format(path))
def get_output_file_data(output_file): # shot if output_file.get("entity_id"): output_file["entity"] = get_entity(output_file["entity_id"]) output_file["project"] = get_project(output_file["entity"]["project_id"]) # sequence output_file["entity"]["parent"] = get_entity(output_file["entity"]["parent_id"]) # asset elif output_file.get("asset_instance_id"): output_file["asset_instance"] = get_asset(output_file["asset_instance_id"]) output_file["project"] = get_project( output_file["asset_instance"]["project_id"] ) if output_file.get("path") and "%" in output_file["path"]: # TODO: catch potential error parse (single frame, etc) collection = clique.parse(output_file["path"]) output_file["collection_path"] = output_file["path"] output_file["path"] = collection.format("{head}{padding}{tail}") frames = list(collection.indexes) output_file["frame_in"], output_file["frame_out"] = frames[0], frames[-1] output_file["person"] = get_person(output_file["person_id"]) output_file["file_status"] = get_attribute( all_file_status, output_file["file_status_id"] ) output_file["output_type"] = get_attribute( all_output_types, output_file["output_type_id"] ) output_file["task_type"] = get_attribute( all_task_types, output_file["task_type_id"] ) return output_file
def test_non_matching_parse(): '''Fail to parse non-matching value.''' with pytest.raises(ValueError): clique.parse('')
def test_parse(value, pattern, expected): '''Construct collection by parsing formatted string.''' if pattern is None: assert clique.parse(value) == expected else: assert clique.parse(value, pattern=pattern) == expected
def process(self, context): import os import json import clique current_file = context.data("currentFile") # Skip if current file is not a directory if not os.path.isdir(current_file): return # Traverse directory and collect collections from json files. instances = [] for root, dirs, files in os.walk(current_file): for f in files: if f.endswith(".json"): with open(os.path.join(root, f)) as json_data: for data in json.load(json_data): instances.append(data) # Validate instance based on supported families. valid_families = ["img", "cache", "scene", "mov"] valid_data = [] for data in instances: families = data.get("families", []) + [data["family"]] family_type = list(set(families) & set(valid_families)) if family_type: valid_data.append(data) # Create existing output instance. scanned_dirs = [] files = [] collections = [] for data in valid_data: if "collection" not in data.keys(): continue if data["collection"] is None: continue instance_collection = clique.parse(data["collection"]) try: version = self.version_get( os.path.basename(instance_collection.format()), "v" )[1] except: # Ignore any output that is not versioned continue # Getting collections of all previous versions and current version for count in range(1, int(version) + 1): # Generate collection version_string = "v" + str(count).zfill(len(version)) head = instance_collection.head.replace( "v" + version, version_string ) collection = clique.Collection( head=head.replace("\\", "/"), padding=instance_collection.padding, tail=instance_collection.tail ) collection.version = count # Scan collection directory scan_dir = os.path.dirname(collection.head) if scan_dir not in scanned_dirs and os.path.exists(scan_dir): for f in os.listdir(scan_dir): file_path = os.path.join(scan_dir, f) files.append(file_path.replace("\\", "/")) scanned_dirs.append(scan_dir) # Match files to collection and add for f in files: if collection.match(f): collection.add(f) # Skip if no files were found in the collection if not list(collection): continue # Skip existing collections if collection in collections: continue instance = context.create_instance(name=data["name"]) version = self.version_get( os.path.basename(collection.format()), "v" )[1] basename = os.path.basename(collection.format()) instance.data["label"] = "{0} - {1}".format( data["name"], basename ) families = data["families"] + [data["family"]] family = list(set(valid_families) & set(families))[0] instance.data["family"] = family instance.data["families"] = ["output"] instance.data["collection"] = collection instance.data["version"] = int(version) instance.data["publish"] = False collections.append(collection)
def create_component(session, event, component_name, assetversion, component): component_location = session.get("Location", event["data"]["location_id"]) location = session.pick_location() component_data = {"name": component_name, "version": assetversion} component_path = "" try: collection = clique.parse( component_location.get_resource_identifier(component), pattern="{head}{padding}{tail}") except ValueError: # Assume its a single file component_path = component_location.get_resource_identifier(component) else: members = list(component.get("members", [])) for member in members: collection.add(component_location.get_resource_identifier(member)) component_path = collection.format() # Component component_entity = session.query(query("Component", component_data)).first() # Overwrite existing component data if requested. if component_entity: origin_location = session.query( "Location where name is \"ftrack.origin\"").one() # Removing existing members from location components = list(component_entity.get("members", [])) components += [component_entity] for component in components: for loc in component["component_locations"]: if location["id"] == loc["location_id"]: location.remove_component(component, recursive=False) # Deleting existing members on component entity for member in component_entity.get("members", []): session.delete(member) del (member) session.commit() # Reset members in memory if "members" in component_entity.keys(): component_entity["members"] = [] # Add components to origin location try: collection = clique.parse(component_path) except ValueError: # Assume its a single file origin_location.add_component(component_entity, component_path) else: # Create member components for sequence. for member_path in collection: size = 0 try: size = os.path.getsize(member_path) except OSError: pass name = collection.match(member_path).group("index") member_data = { "name": name, "container": component_entity, "size": size, "file_type": os.path.splitext(member_path)[-1] } component = session.create("FileComponent", member_data) origin_location.add_component(component, member_path, recursive=False) component_entity["members"].append(component) # Add components to location. location.add_component(component_entity, origin_location, recursive=True) # Create new component if none exists. if not component_entity: component = assetversion.create_component(component_path, data=component_data, location=location)
def process(self, context): import json import clique job = context.data("deadlineJob") data = job.GetJobExtraInfoKeyValueWithDefault( "PyblishInstanceData", "" ) if not data: return data = json.loads(data) if "img" not in data["families"]: self.log.info("Could not find \"img\" in families.") return # Prevent resubmitting same job del data["deadlineData"] data["families"].remove("deadline") name = data["name"] instance = context.create_instance(name=name) instance.data["families"] = ["mov", "local", "deadline"] img_collection = clique.parse(data["collection"]) collection = clique.parse( img_collection.format( "{head}{padding}.mov [" + str(job.JobFramesList[0]) + "]" ) ) instance.data["collection"] = collection.format() for key in data: data[key] = data[key] # Create FFmpeg dependent job job_data = {} job_data["Plugin"] = "FFmpeg" job_data["Frames"] = "{0}-{1}".format(job.JobFramesList[0], job.JobFramesList[-1]) job_data["Name"] = job.Name job_data["UserName"] = job.UserName job_data["ChunkSize"] = job.JobFramesList[-1] + 1 job_data["JobDependency0"] = job.JobId job_data["OutputFilename0"] = list(collection)[0] # Copy environment keys. index = 0 if job.GetJobEnvironmentKeys(): for key in job.GetJobEnvironmentKeys(): value = job.GetJobEnvironmentKeyValue(key) data = "{0}={1}".format(key, value) job_data["EnvironmentKeyValue" + str(index)] = data index += 1 # setting plugin data plugin_data = {} plugin_data["InputFile0"] = img_collection.format( "{head}{padding}{tail}" ) plugin_data["ReplacePadding"] = False plugin_data["ReplacePadding0"] = False plugin_data["UseSameInputArgs"] = False plugin_data["OutputFile"] = list(collection)[0] start_frame = str(job.JobFramesList[0]) inputs_args = "-gamma 2.2 -framerate 25 -start_number " inputs_args += start_frame plugin_data["InputArgs0"] = inputs_args if "audio" in instance.context.data: plugin_data["InputFile1"] = instance.context.data["audio"] output_args = "-q:v 0 -pix_fmt yuv420p -vf scale=trunc(iw/2)*2:" output_args += "trunc(ih/2)*2,colormatrix=bt601:bt709" output_args += " -timecode 00:00:00:01" plugin_data["OutputArgs"] = output_args # setting data data = {"job": job_data, "plugin": plugin_data} instance.data["deadlineData"] = data
def process(self, context): # storing plugin data plugin_data = {'EnforceRenderOrder': True} plugin_data['NukeX'] = nuke.env['nukex'] plugin_data['Version'] = nuke.NUKE_VERSION_STRING.split('v')[0] # creating instances per write node for node in nuke.allNodes(): if node.Class() == 'Write' and not node['disable'].getValue(): instance = context.create_instance(name=node.name()) instance.data['family'] = 'render' instance.data['families'] = ['deadline', 'writeNode'] output_file = node['file'].getValue() instance.data['startFrame'] = int(nuke.Root().knob('first_frame').value()) instance.data['endFrame'] = int(nuke.Root().knob('last_frame').value()) self.log.info(instance.data['startFrame']) if '%' in output_file: padding = int(output_file.split('%')[1][0:2]) padding_string = '%0{0}d'.format(padding) tmp = '#' * padding output_file = output_file.replace(padding_string, tmp) self.log.info(output_file) # populate instance with data instance.data['outputFilename'] = output_file # frame range start_frame = int(nuke.root()['first_frame'].getValue()) end_frame = int(nuke.root()['last_frame'].getValue()) if node['use_limit'].getValue(): start_frame = int(node['first'].getValue()) end_frame = int(node['last'].getValue()) frames = '%s-%s\n' % (start_frame, end_frame) instance.data['startFrame'] = start_frame instance.data['endFrame'] = end_frame instance.data['frames'] = frames # Add collection collection = None try: path = "" if nuke.filename(node): path = nuke.filename(node) path += " [{0}-{1}]".format(start_frame, end_frame) collection = clique.parse(path) except Exception as e: self.log.warning(e) instance.data["collection"] = collection if str(node.name()) in ['Write1', 'Write_dpx']: compname = 'main' else: compname = node.name() # adding ftrack data to activate processing instance.data['ftrackComponents'] = {compname: {}} self.log.debug('component name: {}'.format(compname)) instance.add(node)
def process(self, context): import json import math import clique job = context.data("deadlineJob") # create instance value = job.GetJobExtraInfoKeyValueWithDefault("PyblishInstanceData", "") if not value: return instance_data = json.loads(value) # return early if it isn't a render file render_families = ["ifd"] if not list(set(instance_data["families"]) & set(render_families)): return instance = context.create_instance(name=instance_data["name"]) for key in instance_data.keys(): instance.data[key] = instance_data[key] instance.data["families"] = ["img", "remote", "deadline"] # setting job data job_data = {} job_data["Plugin"] = "Mantra" job_data["Frames"] = job.JobFrames job_data["Name"] = job.Name job_data["JobDependency0"] = job.JobId job_data["IsFrameDependent"] = True frame_count = 0 for f in job.JobFramesList: frame_count += 1 if frame_count > 5000: job_data["ChunkSize"] = int(math.ceil(frame_count / 5000.0)) collection = clique.parse(instance_data["render"]) fmt = "{head}" + "#" * collection.padding + "{tail}" job_data["OutputFilename0"] = collection.format(fmt) # Copy environment keys. index = 0 if job.GetJobEnvironmentKeys(): for key in job.GetJobEnvironmentKeys(): value = job.GetJobEnvironmentKeyValue(key) data = "{0}={1}".format(key, value) job_data["EnvironmentKeyValue" + str(index)] = data index += 1 # setting plugin data plugin_data = {} plugin_data["Version"] = job.GetJobPluginInfoKeyValue("Version") collection = clique.parse(instance_data["collection"]) plugin_data["SceneFile"] = list(collection)[0] # setting data data = {"job": job_data, "plugin": plugin_data} instance.data["deadlineData"] = data
def process(self, instance): session = instance.context.data["ftrackSession"] task = instance.context.data["ftrackTask"] info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): # AssetType # Get existing entity. assettype_data = {"short": "upload"} assettype_data.update(data.get("assettype_data", {})) assettype_entity = session.query( self.query("AssetType", assettype_data) ).first() # Create a new entity if none exits. if not assettype_entity: assettype_entity = session.create("AssetType", assettype_data) self.log.info( "Created new AssetType with data: ".format(assettype_data) ) # Asset # Get existing entity. asset_data = { "name": task["name"], "type": assettype_entity, "parent": task["parent"], } asset_data.update(data.get("asset_data", {})) asset_entity = session.query( self.query("Asset", asset_data) ).first() # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. asset_metadata = asset_data.pop("metadata", {}) # Create a new entity if none exits. if not asset_entity: asset_entity = session.create("Asset", asset_data) self.log.info( info_msg.format( entity_type="Asset", data=asset_data, metadata=asset_metadata ) ) # Adding metadata existing_asset_metadata = asset_entity["metadata"] existing_asset_metadata.update(asset_metadata) asset_entity["metadata"] = existing_asset_metadata # AssetVersion # Get existing entity. assetversion_data = { "version": 0, "asset": asset_entity, "task": task } assetversion_data.update(data.get("assetversion_data", {})) assetversion_entity = session.query( self.query("AssetVersion", assetversion_data) ).first() # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. assetversion_metadata = assetversion_data.pop("metadata", {}) # Create a new entity if none exits. if not assetversion_entity: assetversion_entity = session.create( "AssetVersion", assetversion_data ) self.log.info( info_msg.format( entity_type="AssetVersion", data=assetversion_data, metadata=assetversion_metadata ) ) # Adding metadata existing_assetversion_metadata = assetversion_entity["metadata"] existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata ## manage thumbnails thumbnail_path = data.get("thumbnail_path", "") if thumbnail_path and os.path.isfile(thumbnail_path): self.log.info( 'Got thumbnail: {0!r}.'.format( thumbnail_path ) ) thumbnail_component = assetversion_entity.create_thumbnail(thumbnail_path) self.log.info(thumbnail_component) if data.get("propagate_thumbnail", False): # we can use True for 1 and False for 0 or just an int above 1 to propagate through to the number of parents # 1 is the task where the asset version is linked to # 2 would be the parent of the task # 3 would be the parent of the parent of the task and so on parent_count = data["propagate_thumbnail"] self.log.debug("propagating thumbnail") entities = [] for item in task['link']: entities.insert(0,session.get(item['type'], item['id'])) self.log.debug("found entities %s " % entities) for entity in entities[:parent_count]: if "thumbnail" in entity.keys(): self.log.info("found thumbnail field on %s propagating" % entity) entity["thumbnail"] = thumbnail_component else: continue task.session.commit() else: self.log.info( 'Thumbnail file did not exist: {0!r}.'.format( thumbnail_path ) ) # Have to commit the version and asset, because location can't # determine the final location without. session.commit() # Component # Get existing entity. component_data = { "name": "main", "version": assetversion_entity } component_data.update(data.get("component_data", {})) component_entity = session.query( self.query("Component", component_data) ).first() component_overwrite = data.get("component_overwrite", False) location = data.get("component_location", session.pick_location()) # Overwrite existing component data if requested. if component_entity and component_overwrite: origin_location = session.query( "Location where name is \"ftrack.origin\"" ).one() # Removing existing members from location components = list(component_entity.get("members", [])) components += [component_entity] for component in components: for loc in component["component_locations"]: if location["id"] == loc["location_id"]: location.remove_component( component, recursive=False ) # Deleting existing members on component entity for member in component_entity.get("members", []): session.delete(member) del(member) session.commit() # Reset members in memory if "members" in component_entity.keys(): component_entity["members"] = [] # Add components to origin location try: collection = clique.parse(data["component_path"]) except ValueError: # Assume its a single file # Changing file type name, ext = os.path.splitext(data["component_path"]) component_entity["file_type"] = ext origin_location.add_component( component_entity, data["component_path"] ) else: # Changing file type component_entity["file_type"] = collection.format("{tail}") # Create member components for sequence. for member_path in collection: size = 0 try: size = os.path.getsize(member_path) except OSError: pass name = collection.match(member_path).group("index") member_data = { "name": name, "container": component_entity, "size": size, "file_type": os.path.splitext(member_path)[-1] } component = session.create( "FileComponent", member_data ) origin_location.add_component( component, member_path, recursive=False ) component_entity["members"].append(component) # Add components to location. location.add_component( component_entity, origin_location, recursive=True ) data["component"] = component msg = "Overwriting Component with path: {0}, data: {1}, " msg += "location: {2}" self.log.info( msg.format( data["component_path"], component_data, location ) ) # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. component_metadata = component_data.pop("metadata", {}) # Create new component if none exists. new_component = False if not component_entity: component_entity = assetversion_entity.create_component( data["component_path"], data=component_data, location=location ) data["component"] = component_entity msg = "Created new Component with path: {0}, data: {1}" msg += ", metadata: {2}, location: {3}" self.log.info( msg.format( data["component_path"], component_data, component_metadata, location ) ) new_component = True # Adding metadata existing_component_metadata = component_entity["metadata"] existing_component_metadata.update(component_metadata) component_entity["metadata"] = existing_component_metadata # Setting assetversion thumbnail if data.get("thumbnail", False): assetversion_entity["thumbnail_id"] = component_entity["id"] # Inform user about no changes to the database. if (component_entity and not component_overwrite and not new_component): data["component"] = component_entity self.log.info( "Found existing component, and no request to overwrite. " "Nothing has been changed." ) else: # Commit changes. session.commit()
def process(self, context): import os import clique import nuke # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Read": continue if not node.metadata(): continue # Determine output type output_type = "img" movie_formats = ["ari", "avi", "gif", "mov", "r3d"] if node.metadata()["input/filereader"] in movie_formats: output_type = "mov" scene_formats = ["psd"] if node.metadata()["input/filereader"] in scene_formats: output_type = "scene" # Create instance instance = context.create_instance(node.name()) instance.data["families"] = [output_type, "local", "output"] instance.data["family"] = "read" instance.add(node) path = nuke.filename(node) # Adding/Checking publish attribute if "publish" not in node.knobs(): knob = nuke.Boolean_Knob("publish", "Publish") knob.setValue(False) node.addKnob(knob) # Compare against selection selection = instance.context.data.get("selection", []) publish = bool(node["publish"].getValue()) if selection: if list(set(instance) & set(selection)): publish = True else: publish = False instance.data["publish"] = publish # Collecting file paths label = "{0} - {1}".format(node.name(), os.path.basename(path)) if output_type == "img": # This could be improved because it does not account for "#" # being in a sequence. if "#" in path: padding = path.count("#") path = path.replace( "#" * padding, "%{0:0>2}d".format(padding) ) try: collection = clique.parse(path + " []") except ValueError as e: collections, remainder = clique.assemble( [path], minimum_items=1, patterns=[clique.PATTERNS['frames']] ) if collections: collection = collections[0] else: context.remove(instance) self.log.warning( "Collection error on \"{0}\": " "{1}".format(node.name(), e) ) continue for f in os.listdir(os.path.dirname(path)): file_path = os.path.join(os.path.dirname(path), f) file_path = file_path.replace("\\", "/") if collection.match(file_path): collection.add(file_path) # Limit to frame range first = node["first"].value() last = node["last"].value() indexes = list(collection.indexes) collection.indexes.clear() collection.indexes.update( set(indexes) & set([x for x in range(first, last + 1)]) ) instance.data["collection"] = collection label = "{0} - {1}".format( node.name(), os.path.basename(collection.format()) ) else: instance.data["output_path"] = path instance.data["label"] = label def instanceToggled(instance, value): # Removing and adding the knob to support NukeAssist, where # you can't modify the knob value directly. instance[0].removeKnob(instance[0]["publish"]) knob = nuke.Boolean_Knob( "publish", "Publish" ) knob.setValue(value) instance[0].addKnob(knob) instance.data["instanceToggled"] = instanceToggled
def process(self, context): import os import re import hiero.exporters as he import clique for parent in context: if "trackItem" != parent.data["family"]: continue for task in parent.data["tasks"]: asset_type = None hiero_cls = he.FnSymLinkExporter.SymLinkExporter if isinstance(task, hiero_cls): asset_type = "img" movie_formats = [".mov", ".R3D"] ext = os.path.splitext(task.resolvedExportPath())[1] if ext in movie_formats: asset_type = "mov" hiero_cls = he.FnTranscodeExporter.TranscodeExporter if isinstance(task, hiero_cls): asset_type = "img" if task.resolvedExportPath().endswith(".mov"): asset_type = "mov" hiero_cls = he.FnNukeShotExporter.NukeShotExporter if isinstance(task, hiero_cls): asset_type = "scene" hiero_cls = he.FnAudioExportTask.AudioExportTask if isinstance(task, hiero_cls): asset_type = "audio" # Skip all non supported export types if not asset_type: continue resolved_path = task.resolvedExportPath() # Formatting the basename to not include frame padding or # extension. name = os.path.splitext(os.path.basename(resolved_path))[0] name = name.replace(".", "") name = name.replace("#", "") name = re.sub(r"%.*d", "", name) instance = context.create_instance(name=name, parent=parent) instance.data["task"] = task instance.data["item"] = parent.data["item"] instance.data["family"] = "trackItem.task" instance.data["families"] = [asset_type, "local", "task"] label = "{1}/{0} - {2} - local".format( name, parent, asset_type ) instance.data["label"] = label instance.data["handles"] = parent.data["handles"] # Add collection or output if asset_type == "img": collection = None if "#" in resolved_path: head = resolved_path.split("#")[0] padding = resolved_path.count("#") tail = resolved_path.split("#")[-1] collection = clique.Collection( head=head, padding=padding, tail=tail ) if "%" in resolved_path: collection = clique.parse( resolved_path, pattern="{head}{padding}{tail}" ) instance.data["collection"] = collection else: instance.data["output_path"] = resolved_path
def process(self, instance): session = instance.context.data["ftrackSession"] if instance.data.get("ftrackTask"): task = instance.data["ftrackTask"] name = task parent = task["parent"] elif instance.data.get("ftrackEntity"): task = None name = instance.data.get("ftrackEntity")['name'] parent = instance.data.get("ftrackEntity") elif instance.context.data.get("ftrackTask"): task = instance.context.data["ftrackTask"] name = task parent = task["parent"] elif instance.context.data.get("ftrackEntity"): task = None name = instance.context.data.get("ftrackEntity")['name'] parent = instance.context.data.get("ftrackEntity") info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." used_asset_versions = [] # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): # AssetType # Get existing entity. assettype_data = {"short": "upload"} assettype_data.update(data.get("assettype_data", {})) self.log.debug("data: {}".format(data)) assettype_entity = session.query( self.query("AssetType", assettype_data)).first() # Create a new entity if none exits. if not assettype_entity: assettype_entity = session.create("AssetType", assettype_data) self.log.debug( "Created new AssetType with data: ".format(assettype_data)) # Asset # Get existing entity. asset_data = { "name": name, "type": assettype_entity, "parent": parent, } asset_data.update(data.get("asset_data", {})) asset_entity = session.query(self.query("Asset", asset_data)).first() self.log.info("asset entity: {}".format(asset_entity)) # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. asset_metadata = asset_data.pop("metadata", {}) # Create a new entity if none exits. if not asset_entity: asset_entity = session.create("Asset", asset_data) self.log.debug( info_msg.format(entity_type="Asset", data=asset_data, metadata=asset_metadata)) try: session.commit() except Exception: tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) # Adding metadata existing_asset_metadata = asset_entity["metadata"] existing_asset_metadata.update(asset_metadata) asset_entity["metadata"] = existing_asset_metadata # AssetVersion # Get existing entity. assetversion_data = { "version": 0, "asset": asset_entity, } _assetversion_data = data.get("assetversion_data", {}) assetversion_cust_attrs = _assetversion_data.pop( "custom_attributes", {}) asset_version_comment = _assetversion_data.pop("comment", None) assetversion_data.update(_assetversion_data) assetversion_entity = session.query( self.query("AssetVersion", assetversion_data)).first() # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. assetversion_metadata = assetversion_data.pop("metadata", {}) if task: assetversion_data['task'] = task # Create a new entity if none exits. if not assetversion_entity: assetversion_entity = session.create("AssetVersion", assetversion_data) self.log.debug( info_msg.format(entity_type="AssetVersion", data=assetversion_data, metadata=assetversion_metadata)) try: session.commit() except Exception: tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) # Adding metadata existing_assetversion_metadata = assetversion_entity["metadata"] existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata # Add comment if asset_version_comment: assetversion_entity["comment"] = asset_version_comment try: session.commit() except Exception: session.rollback() self.log.warning( ("Comment was not possible to set for AssetVersion" "\"{0}\". Can't set it's value to: \"{1}\"").format( assetversion_entity["id"], str(asset_version_comment))) # Adding Custom Attributes for attr, val in assetversion_cust_attrs.items(): if attr in assetversion_entity["custom_attributes"]: try: assetversion_entity["custom_attributes"][attr] = val session.commit() continue except Exception: session.rollback() self.log.warning(("Custom Attrubute \"{0}\"" " is not available for AssetVersion <{1}>." " Can't set it's value to: \"{2}\"").format( attr, assetversion_entity["id"], str(val))) # Have to commit the version and asset, because location can't # determine the final location without. try: session.commit() except Exception: tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) # Component # Get existing entity. component_data = {"name": "main", "version": assetversion_entity} component_data.update(data.get("component_data", {})) component_entity = session.query( self.query("Component", component_data)).first() component_overwrite = data.get("component_overwrite", False) location = data.get("component_location", session.pick_location()) # Overwrite existing component data if requested. if component_entity and component_overwrite: origin_location = session.query( "Location where name is \"ftrack.origin\"").one() # Removing existing members from location components = list(component_entity.get("members", [])) components += [component_entity] for component in components: for loc in component["component_locations"]: if location["id"] == loc["location_id"]: location.remove_component(component, recursive=False) # Deleting existing members on component entity for member in component_entity.get("members", []): session.delete(member) del (member) try: session.commit() except Exception: tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) # Reset members in memory if "members" in component_entity.keys(): component_entity["members"] = [] # Add components to origin location try: collection = clique.parse(data["component_path"]) except ValueError: # Assume its a single file # Changing file type name, ext = os.path.splitext(data["component_path"]) component_entity["file_type"] = ext origin_location.add_component(component_entity, data["component_path"]) else: # Changing file type component_entity["file_type"] = collection.format("{tail}") # Create member components for sequence. for member_path in collection: size = 0 try: size = os.path.getsize(member_path) except OSError: pass name = collection.match(member_path).group("index") member_data = { "name": name, "container": component_entity, "size": size, "file_type": os.path.splitext(member_path)[-1] } component = session.create("FileComponent", member_data) origin_location.add_component(component, member_path, recursive=False) component_entity["members"].append(component) # Add components to location. location.add_component(component_entity, origin_location, recursive=True) data["component"] = component_entity msg = "Overwriting Component with path: {0}, data: {1}, " msg += "location: {2}" self.log.info( msg.format(data["component_path"], component_data, location)) # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. component_metadata = component_data.pop("metadata", {}) # Create new component if none exists. new_component = False if not component_entity: component_entity = assetversion_entity.create_component( data["component_path"], data=component_data, location=location) data["component"] = component_entity msg = "Created new Component with path: {0}, data: {1}" msg += ", metadata: {2}, location: {3}" self.log.info( msg.format(data["component_path"], component_data, component_metadata, location)) new_component = True # Adding metadata existing_component_metadata = component_entity["metadata"] existing_component_metadata.update(component_metadata) component_entity["metadata"] = existing_component_metadata # if component_data['name'] = 'ftrackreview-mp4-mp4': # assetversion_entity["thumbnail_id"] # Setting assetversion thumbnail if data.get("thumbnail", False): assetversion_entity["thumbnail_id"] = component_entity["id"] # Inform user about no changes to the database. if (component_entity and not component_overwrite and not new_component): data["component"] = component_entity self.log.info( "Found existing component, and no request to overwrite. " "Nothing has been changed.") else: # Commit changes. try: session.commit() except Exception: tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) if assetversion_entity not in used_asset_versions: used_asset_versions.append(assetversion_entity) asset_versions_key = "ftrackIntegratedAssetVersions" if asset_versions_key not in instance.data: instance.data[asset_versions_key] = [] for asset_version in used_asset_versions: if asset_version not in instance.data[asset_versions_key]: instance.data[asset_versions_key].append(asset_version)
def process(self, context): import os import json import clique current_file = context.data("currentFile") # Skip if current file is not a directory if not os.path.isdir(current_file): return # Traverse directory and collect collections from json files. instances = [] for root, dirs, files in os.walk(current_file): for f in files: if f.endswith(".json"): with open(os.path.join(root, f)) as json_data: for data in json.load(json_data): instances.append(data) # Validate instance based on supported families. valid_families = ["img", "cache", "scene", "mov"] valid_data = [] for data in instances: families = data.get("families", []) + [data["family"]] family_type = list(set(families) & set(valid_families)) if family_type: valid_data.append(data) # Create existing output instance. scanned_dirs = [] files = [] collections = [] for data in valid_data: if "collection" not in data.keys(): continue if data["collection"] is None: continue instance_collection = clique.parse(data["collection"]) try: version = self.version_get( os.path.basename(instance_collection.format()), "v")[1] except: # Ignore any output that is not versioned continue # Getting collections of all previous versions and current version for count in range(1, int(version) + 1): # Generate collection version_string = "v" + str(count).zfill(len(version)) head = instance_collection.head.replace( "v" + version, version_string) collection = clique.Collection( head=head.replace("\\", "/"), padding=instance_collection.padding, tail=instance_collection.tail) collection.version = count # Scan collection directory scan_dir = os.path.dirname(collection.head) if scan_dir not in scanned_dirs and os.path.exists(scan_dir): for f in os.listdir(scan_dir): file_path = os.path.join(scan_dir, f) files.append(file_path.replace("\\", "/")) scanned_dirs.append(scan_dir) # Match files to collection and add for f in files: if collection.match(f): collection.add(f) # Skip if no files were found in the collection if not list(collection): continue # Skip existing collections if collection in collections: continue instance = context.create_instance(name=data["name"]) version = self.version_get( os.path.basename(collection.format()), "v")[1] basename = os.path.basename(collection.format()) instance.data["label"] = "{0} - {1}".format( data["name"], basename) families = data["families"] + [data["family"]] family = list(set(valid_families) & set(families))[0] instance.data["family"] = family instance.data["families"] = ["output"] instance.data["collection"] = collection instance.data["version"] = int(version) instance.data["publish"] = False collections.append(collection)
def process(self, context): import os import re import hiero.exporters as he import clique for parent in context: if "trackItem" != parent.data["family"]: continue for task in parent.data["tasks"]: asset_type = None hiero_cls = he.FnSymLinkExporter.SymLinkExporter if isinstance(task, hiero_cls): asset_type = "img" movie_formats = [".mov", ".R3D"] ext = os.path.splitext(task.resolvedExportPath())[1] if ext in movie_formats: asset_type = "mov" hiero_cls = he.FnTranscodeExporter.TranscodeExporter if isinstance(task, hiero_cls): asset_type = "img" if task.resolvedExportPath().endswith(".mov"): asset_type = "mov" hiero_cls = he.FnNukeShotExporter.NukeShotExporter if isinstance(task, hiero_cls): asset_type = "scene" hiero_cls = he.FnAudioExportTask.AudioExportTask if isinstance(task, hiero_cls): asset_type = "audio" # Skip all non supported export types if not asset_type: continue resolved_path = task.resolvedExportPath() # Formatting the basename to not include frame padding or # extension. name = os.path.splitext(os.path.basename(resolved_path))[0] name = name.replace(".", "") name = name.replace("#", "") name = re.sub(r"%.*d", "", name) instance = context.create_instance(name=name, parent=parent) instance.data["task"] = task instance.data["item"] = parent.data["item"] instance.data["family"] = "trackItem.task" instance.data["families"] = [asset_type, "local", "task"] label = "{0} ({1}) - {2} - local".format( name, parent, asset_type) instance.data["label"] = label instance.data["handles"] = parent.data["handles"] # Add collection or output if asset_type == "img": collection = None if "#" in resolved_path: head = resolved_path.split("#")[0] padding = resolved_path.count("#") tail = resolved_path.split("#")[-1] collection = clique.Collection(head=head, padding=padding, tail=tail) if "%" in resolved_path: collection = clique.parse( resolved_path, pattern="{head}{padding}{tail}") instance.data["collection"] = collection else: instance.data["output_path"] = resolved_path
def modify_launch(event): """Modify the application launch command with potential files to open""" # Collect published paths data = {} for item in event["data"].get("selection", []): versions = [] if item["entityType"] == "assetversion": version = ftrack.AssetVersion(item["entityId"]) if version.getAsset().getType().getShort() in ["img", "mov"]: versions.append(version) # Add latest version of "img" and "mov" type from tasks. if item["entityType"] == "task": task = ftrack.Task(item["entityId"]) for asset in task.getAssets(assetTypes=["img", "mov"]): versions.append(asset.getVersions()[-1]) for version in versions: for component in version.getComponents(): component_list = data.get(component.getName(), []) component_list.append(component) data[component.getName()] = component_list label = "v{0} - {1} - {2}" label = label.format( str(version.getVersion()).zfill(3), version.getAsset().getType().getName(), component.getName()) file_path = component.getFilesystemPath() if component.isSequence(): if component.getMembers(): frame = int(component.getMembers()[0].getName()) file_path = file_path % frame event["data"]["items"].append({ "label": label, "value": file_path }) # Collect workspace paths session = ftrack_api.Session() for item in event["data"].get("selection", []): if item["entityType"] == "task": templates = ftrack_template.discover_templates() task_area, template = ftrack_template.format({}, templates, entity=session.get( "Task", item["entityId"])) # Traverse directory and collect collections from json files. instances = [] for root, dirs, files in os.walk(task_area): for f in files: if f.endswith(".json"): with open(os.path.join(root, f)) as json_data: for data in json.load(json_data): instances.append(data) check_values = [] for data in instances: if "collection" in data: # Check all files in the collection collection = clique.parse(data["collection"]) for f in list(collection): if not os.path.exists(f): collection.remove(f) if list(collection): value = list(collection)[0] # Check if value already exists if value in check_values: continue else: check_values.append(value) # Add workspace items event["data"]["items"].append({ "label": "{0} - {1}".format( data["name"], os.path.basename(collection.format())), "value": value }) return event
def process(self, instance): session = instance.context.data["ftrackSession"] task = instance.context.data["ftrackTask"] info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): # AssetType # Get existing entity. assettype_data = {"short": "upload"} assettype_data.update(data.get("assettype_data", {})) assettype_entity = session.query( self.query("AssetType", assettype_data) ).first() # Create a new entity if none exits. if not assettype_entity: assettype_entity = session.create("AssetType", assettype_data) self.log.info( "Created new AssetType with data: ".format(assettype_data) ) # Asset # Get existing entity. asset_data = { "name": task["name"], "type": assettype_entity, "parent": task["parent"], } asset_data.update(data.get("asset_data", {})) asset_entity = session.query( self.query("Asset", asset_data) ).first() # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. asset_metadata = asset_data.pop("metadata", {}) # Create a new entity if none exits. if not asset_entity: asset_entity = session.create("Asset", asset_data) self.log.info( info_msg.format( entity_type="Asset", data=asset_data, metadata=asset_metadata ) ) # Adding metadata existing_asset_metadata = asset_entity["metadata"] existing_asset_metadata.update(asset_metadata) asset_entity["metadata"] = existing_asset_metadata # AssetVersion # Get existing entity. assetversion_data = { "version": 0, "asset": asset_entity, "task": task } assetversion_data.update(data.get("assetversion_data", {})) assetversion_entity = session.query( self.query("AssetVersion", assetversion_data) ).first() # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. assetversion_metadata = assetversion_data.pop("metadata", {}) # Create a new entity if none exits. if not assetversion_entity: assetversion_entity = session.create( "AssetVersion", assetversion_data ) self.log.info( info_msg.format( entity_type="AssetVersion", data=assetversion_data, metadata=assetversion_metadata ) ) # Adding metadata existing_assetversion_metadata = assetversion_entity["metadata"] existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata # Have to commit the version and asset, because location can't # determine the final location without. session.commit() # Component # Get existing entity. component_data = { "name": "main", "version": assetversion_entity } component_data.update(data.get("component_data", {})) component_entity = session.query( self.query("Component", component_data) ).first() component_overwrite = data.get("component_overwrite", False) location = data.get("component_location", session.pick_location()) # Overwrite existing component data if requested. if component_entity and component_overwrite: origin_location = session.query( "Location where name is \"ftrack.origin\"" ).one() # Removing existing members from location components = list(component_entity.get("members", [])) components += [component_entity] for component in components: for loc in component["component_locations"]: if location["id"] == loc["location_id"]: location.remove_component( component, recursive=False ) # Deleting existing members on component entity for member in component_entity.get("members", []): session.delete(member) del(member) session.commit() # Reset members in memory if "members" in component_entity.keys(): component_entity["members"] = [] # Add components to origin location try: collection = clique.parse(data["component_path"]) except ValueError: # Assume its a single file # Changing file type name, ext = os.path.splitext(data["component_path"]) component_entity["file_type"] = ext origin_location.add_component( component_entity, data["component_path"] ) else: # Changing file type component_entity["file_type"] = collection.format("{tail}") # Create member components for sequence. for member_path in collection: size = 0 try: size = os.path.getsize(member_path) except OSError: pass name = collection.match(member_path).group("index") member_data = { "name": name, "container": component_entity, "size": size, "file_type": os.path.splitext(member_path)[-1] } component = session.create( "FileComponent", member_data ) origin_location.add_component( component, member_path, recursive=False ) component_entity["members"].append(component) # Add components to location. location.add_component( component_entity, origin_location, recursive=True ) data["component"] = component_entity msg = "Overwriting Component with path: {0}, data: {1}, " msg += "location: {2}" self.log.info( msg.format( data["component_path"], component_data, location ) ) # Extracting metadata, and adding after entity creation. This is # due to a ftrack_api bug where you can't add metadata on creation. component_metadata = component_data.pop("metadata", {}) # Create new component if none exists. new_component = False if not component_entity: component_entity = assetversion_entity.create_component( data["component_path"], data=component_data, location=location ) data["component"] = component_entity msg = "Created new Component with path: {0}, data: {1}" msg += ", metadata: {2}, location: {3}" self.log.info( msg.format( data["component_path"], component_data, component_metadata, location ) ) new_component = True # Adding metadata existing_component_metadata = component_entity["metadata"] existing_component_metadata.update(component_metadata) component_entity["metadata"] = existing_component_metadata # Setting assetversion thumbnail if data.get("thumbnail", False): assetversion_entity["thumbnail_id"] = component_entity["id"] # Inform user about no changes to the database. if (component_entity and not component_overwrite and not new_component): data["component"] = component_entity self.log.info( "Found existing component, and no request to overwrite. " "Nothing has been changed." ) else: # Commit changes. session.commit()
def modify_launch(event): """Modify the application launch command with potential files to open""" # Collect published paths data = {} for item in event["data"].get("selection", []): versions = [] if item["entityType"] == "assetversion": version = ftrack.AssetVersion(item["entityId"]) if version.getAsset().getType().getShort() in ["img", "mov"]: versions.append(version) # Add latest version of "img" and "mov" type from tasks. if item["entityType"] == "task": task = ftrack.Task(item["entityId"]) for asset in task.getAssets(assetTypes=["img", "mov"]): versions.append(asset.getVersions()[-1]) for version in versions: for component in version.getComponents(): component_list = data.get(component.getName(), []) component_list.append(component) data[component.getName()] = component_list label = "v{0} - {1} - {2}" label = label.format( str(version.getVersion()).zfill(3), version.getAsset().getType().getName(), component.getName() ) file_path = component.getFilesystemPath() if component.isSequence(): if component.getMembers(): frame = int(component.getMembers()[0].getName()) file_path = file_path % frame event["data"]["items"].append( {"label": label, "value": file_path} ) # Collect workspace paths session = ftrack_api.Session() for item in event["data"].get("selection", []): if item["entityType"] == "task": templates = ftrack_template.discover_templates() task_area, template = ftrack_template.format( {}, templates, entity=session.get("Task", item["entityId"]) ) # Traverse directory and collect collections from json files. instances = [] for root, dirs, files in os.walk(task_area): for f in files: if f.endswith(".json"): with open(os.path.join(root, f)) as json_data: for data in json.load(json_data): instances.append(data) check_values = [] for data in instances: if "collection" in data: # Check all files in the collection collection = clique.parse(data["collection"]) for f in list(collection): if not os.path.exists(f): collection.remove(f) if list(collection): value = list(collection)[0] # Check if value already exists if value in check_values: continue else: check_values.append(value) # Add workspace items event["data"]["items"].append( { "label": "{0} - {1}".format( data["name"], os.path.basename(collection.format()) ), "value": value } ) return event
def process(self, instance): import time import os import hiero.core.nuke as nuke import hiero.exporters as he import clique task = instance.data["task"] hiero_cls = he.FnSymLinkExporter.SymLinkExporter if isinstance(task, hiero_cls): src = os.path.join(task.filepath(), task.fileName()) # Filelink each image file if "img" in instance.data["families"]: collection = clique.parse(src + " []") for f in os.listdir(os.path.dirname(src)): f = os.path.join(os.path.dirname(src), f) frame_offset = task.outputRange()[0] - task.inputRange()[0] input_range = (int(task.inputRange()[0]), int(task.inputRange()[1]) + 1) for index in range(*input_range): dst = task.resolvedExportPath() % (index + frame_offset) self.filelink(src % index, dst) # Filelink movie file if "mov" in instance.data["families"]: dst = task.resolvedExportPath() self.filelink(src, dst) hiero_cls = he.FnTranscodeExporter.TranscodeExporter if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) script_path = task._scriptfile log_path = script_path.replace(".nk", ".log") log_file = open(log_path, "w") process = nuke.executeNukeScript(script_path, log_file, True) self.poll(process) log_file.close() if not task._preset.properties()["keepNukeScript"]: os.remove(script_path) os.remove(log_path) hiero_cls = he.FnNukeShotExporter.NukeShotExporter if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) hiero_cls = he.FnAudioExportTask.AudioExportTask if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) # Fill collection with output if "img" in instance.data["families"]: collection = instance.data["collection"] path = os.path.dirname(collection.format()) for f in os.listdir(path): file_path = os.path.join(path, f).replace("\\", "/") if collection.match(file_path): collection.add(file_path)
def process(self, context): job = context.data("deadlineJob") data = job.GetJobExtraInfoKeyValueWithDefault("PyblishInstanceData", "") if not data: return data = json.loads(data) if "img" not in data["families"]: self.log.info("Could not find \"img\" in families.") return # Prevent resubmitting same job del data["deadlineData"] data["families"].remove("deadline") name = data["name"] instance = context.create_instance(name=name) instance.data["families"] = ["mov", "local", "deadline"] img_collection = clique.parse(data["collection"]) collection = clique.parse( img_collection.format("{head}{padding}.mov [" + str(job.JobFramesList[0]) + "]")) instance.data["collection"] = collection.format() for key in data: data[key] = data[key] # Create FFmpeg dependent job job_data = {} job_data["Plugin"] = "FFmpeg" job_data["Frames"] = "{0}-{1}".format(job.JobFramesList[0], job.JobFramesList[-1]) job_data["Name"] = job.Name job_data["UserName"] = job.UserName job_data["ChunkSize"] = job.JobFramesList[-1] + 1 job_data["JobDependency0"] = job.JobId job_data["OutputFilename0"] = list(collection)[0] # Copy environment keys. index = 0 if job.GetJobEnvironmentKeys(): for key in job.GetJobEnvironmentKeys(): value = job.GetJobEnvironmentKeyValue(key) data = "{0}={1}".format(key, value) job_data["EnvironmentKeyValue" + str(index)] = data index += 1 # setting plugin data plugin_data = {} plugin_data["InputFile0"] = img_collection.format( "{head}{padding}{tail}") plugin_data["ReplacePadding"] = False plugin_data["ReplacePadding0"] = False plugin_data["UseSameInputArgs"] = False plugin_data["OutputFile"] = list(collection)[0] start_frame = str(job.JobFramesList[0]) inputs_args = "-gamma 2.2 -framerate 25 -start_number " inputs_args += start_frame plugin_data["InputArgs0"] = inputs_args if "audio" in instance.context.data: plugin_data["InputFile1"] = instance.context.data["audio"] output_args = "-q:v 0 -pix_fmt yuv420p -vf scale=trunc(iw/2)*2:" output_args += "trunc(ih/2)*2,colormatrix=bt601:bt709" output_args += " -timecode 00:00:00:01" plugin_data["OutputArgs"] = output_args # setting data data = {"job": job_data, "plugin": plugin_data} instance.data["deadlineData"] = data
def process(self, context): import os import clique import nuke # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Read": continue if not node.metadata(): continue # Determine output type output_type = "img" movie_formats = ["ari", "avi", "gif", "mov", "r3d"] if node.metadata()["input/filereader"] in movie_formats: output_type = "mov" scene_formats = ["psd"] if node.metadata()["input/filereader"] in scene_formats: output_type = "scene" # Create instance instance = context.create_instance(node.name()) instance.data["families"] = [output_type, "local", "output"] instance.data["family"] = "read" instance.add(node) path = nuke.filename(node) # Adding/Checking publish attribute if "publish" not in node.knobs(): knob = nuke.Boolean_Knob("publish", "Publish") knob.setValue(False) node.addKnob(knob) # Compare against selection selection = instance.context.data.get("selection", []) publish = bool(node["publish"].getValue()) if selection: if list(set(instance) & set(selection)): publish = True else: publish = False instance.data["publish"] = publish # Collecting file paths label = "{0} - {1}".format(node.name(), os.path.basename(path)) if output_type == "img": # This could be improved because it does not account for "#" # being in a sequence. if "#" in path: padding = path.count("#") path = path.replace("#" * padding, "%{0:0>2}d".format(padding)) try: collection = clique.parse(path + " []") except ValueError as e: collections, remainder = clique.assemble( [path], minimum_items=1, patterns=[clique.PATTERNS['frames']]) if collections: collection = collections[0] else: context.remove(instance) self.log.warning("Collection error on \"{0}\": " "{1}".format(node.name(), e)) continue for f in os.listdir(os.path.dirname(path)): file_path = os.path.join(os.path.dirname(path), f) file_path = file_path.replace("\\", "/") if collection.match(file_path): collection.add(file_path) # Limit to frame range first = node["first"].value() last = node["last"].value() indexes = list(collection.indexes) collection.indexes.clear() collection.indexes.update( set(indexes) & set([x for x in range(first, last + 1)])) instance.data["collection"] = collection label = "{0} - {1}".format( node.name(), os.path.basename(collection.format())) else: instance.data["output_path"] = path instance.data["label"] = label def instanceToggled(instance, value): # Removing and adding the knob to support NukeAssist, where # you can't modify the knob value directly. instance[0].removeKnob(instance[0]["publish"]) knob = nuke.Boolean_Knob("publish", "Publish") knob.setValue(value) instance[0].addKnob(knob) instance.data["instanceToggled"] = instanceToggled
def process(self, instance): import time import os import hiero.core.nuke as nuke import hiero.exporters as he import clique task = instance.data["task"] hiero_cls = he.FnSymLinkExporter.SymLinkExporter if isinstance(task, hiero_cls): src = os.path.join( task.filepath(), task.fileName() ) # Filelink each image file if "img" in instance.data["families"]: collection = clique.parse(src + " []") for f in os.listdir(os.path.dirname(src)): f = os.path.join(os.path.dirname(src), f) frame_offset = task.outputRange()[0] - task.inputRange()[0] input_range = ( int(task.inputRange()[0]), int(task.inputRange()[1]) + 1 ) for index in range(*input_range): dst = task.resolvedExportPath() % (index + frame_offset) self.filelink(src % index, dst) # Filelink movie file if "mov" in instance.data["families"]: dst = task.resolvedExportPath() self.filelink(src, dst) hiero_cls = he.FnTranscodeExporter.TranscodeExporter if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) script_path = task._scriptfile log_path = script_path.replace(".nk", ".log") log_file = open(log_path, "w") process = nuke.executeNukeScript(script_path, log_file, True) self.poll(process) log_file.close() if not task._preset.properties()["keepNukeScript"]: os.remove(script_path) os.remove(log_path) hiero_cls = he.FnNukeShotExporter.NukeShotExporter if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) hiero_cls = he.FnAudioExportTask.AudioExportTask if isinstance(task, hiero_cls): task.startTask() while task.taskStep(): time.sleep(1) # Fill collection with output if "img" in instance.data["families"]: collection = instance.data["collection"] path = os.path.dirname(collection.format()) for f in os.listdir(path): file_path = os.path.join(path, f).replace("\\", "/") if collection.match(file_path): collection.add(file_path)