def test_assemble_minimum_items_filter(): '''Filter assembled collections by minimum number of items.''' items = [ 'head_v1.001.ext', 'head_v1.002.ext', 'head_v1.003.ext', 'head_v2.001.ext', 'head_v2.002.ext' ] collections, _ = clique.assemble(items, minimum_items=1) expected = [ clique.Collection('head_v', '.001.ext', 0, indexes=set([1, 2])), clique.Collection('head_v', '.002.ext', 0, indexes=set([1, 2])), clique.Collection('head_v', '.003.ext', 0, indexes=set([1])), clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])), clique.Collection('head_v2.', '.ext', 3, indexes=set([1, 2])) ] assert sorted(collections) == sorted(expected) collections, _ = clique.assemble(items, minimum_items=3) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])) ] assert collections == expected collections, _ = clique.assemble(items, minimum_items=5) expected = [] assert collections == expected
def update(self, container, representation): import pymel.core as pm path = api.get_representation_path(representation) files_in_path = os.listdir(os.path.split(path)[0]) sequence = 0 collections, remainder = clique.assemble(files_in_path) if collections: sequence = 1 # Update the standin standins = list() members = pm.sets(container['objectName'], query=True) for member in members: shape = member.getShape() if (shape and shape.type() == "aiStandIn"): standins.append(shape) for standin in standins: standin.dso.set(path) standin.useFrameExtension.set(sequence) container = pm.PyNode(container["objectName"]) container.representation.set(str(representation["_id"]))
def process(self, context, plugin): import clique import nuke # Get the errored instances failed = [] for result in context.data["results"]: if (result["error"] is not None and result["instance"] is not None and result["instance"] not in failed): failed.append(result["instance"]) # Apply pyblish.logic to get the instances for the plug-in instances = api.instances_by_plugin(failed, plugin) for instance in instances: collection = clique.assemble([nuke.filename(instance[0])], minimum_items=1, patterns=[clique.PATTERNS['frames'] ])[0][0] instance[0]["file"].setValue( collection.format("{head}{padding}{tail}")) instance[0]["first"].setValue(list(collection.indexes)[0]) instance[0]["last"].setValue(list(collection.indexes)[0]) instance[0]["origfirst"].setValue(list(collection.indexes)[0]) instance[0]["origlast"].setValue(list(collection.indexes)[0])
def process(self, context, plugin): import clique import nuke # Get the errored instances failed = [] for result in context.data["results"]: if (result["error"] is not None and result["instance"] is not None and result["instance"] not in failed): failed.append(result["instance"]) # Apply pyblish.logic to get the instances for the plug-in instances = api.instances_by_plugin(failed, plugin) for instance in instances: collection = clique.assemble( [nuke.filename(instance[0])], minimum_items=1, patterns=[clique.PATTERNS['frames']] )[0][0] instance[0]["file"].setValue( collection.format("{head}{padding}{tail}") ) instance[0]["first"].setValue(list(collection.indexes)[0]) instance[0]["last"].setValue(list(collection.indexes)[0]) instance[0]["origfirst"].setValue(list(collection.indexes)[0]) instance[0]["origlast"].setValue(list(collection.indexes)[0])
def dropEvent(self, event): '''Handle dropped file event.''' self._setDropZoneState() # TODO: Allow hook into the dropEvent. paths = self._processMimeData(event.mimeData()) self.log.debug(u'Paths: {0}'.format(paths)) # Use frames pattern instead of default digits pattern to only match # frame sequences. framesPattern = clique.PATTERNS.get('frames') sequences, remainders = clique.assemble(paths, patterns=[framesPattern]) self.log.debug(u'Sequences: {0}'.format(sequences)) self.log.debug(u'Remainders: {0}'.format(remainders)) for sequence in sequences: self.dataSelected.emit(sequence.format()) for path in remainders: self.dataSelected.emit(path) event.accept()
def load(self, context, name=None, namespace=None, data=None): collections, remainder = clique.assemble( os.listdir(os.path.dirname(self.fname))) files = [] if collections: for f in list(collections[0]): files.append( os.path.join(os.path.dirname(self.fname), f).replace("\\", "/")) else: files.append( os.path.join(os.path.dirname(self.fname), remainder[0]).replace("\\", "/")) name = context["subset"]["name"] name += "_{}".format(uuid.uuid4()) read_node = harmony.send({ "function": copy_files + import_files, "args": ["Top", files, name, 1] })["result"] return harmony.containerise(name, namespace, read_node, context, self.__class__.__name__, nodes=[read_node])
def process(self, instance): self.log.debug('instance.data["files"]: {}'.format( instance.data['files'])) assert instance.data.get( 'files'), "no frames were collected, you need to render them" collections, remainder = clique.assemble(*instance.data['files']) self.log.info('collections: {}'.format(str(collections))) collection = collections[0] frame_length = instance.data["endFrame"] \ - instance.data["startFrame"] + 1 if frame_length is not 1: assert len(collections ) == 1, "There are multiple collections in the folder" assert collection.is_contiguous( ), "Some frames appear to be missing" assert remainder is not None, "There are some extra files in folder" self.log.info('frame_length: {}'.format(frame_length)) self.log.info('len(collection.indexes): {}'.format( len(collection.indexes))) assert len( collection.indexes) is frame_length, "{} missing frames. Use " "repair to render all frames".format(__name__) instance.data['collection'] = collection
def process(self, context): for instance in context: # Skip invalid instance families if "render" not in instance.data.get("families", []): continue node = instance[0] # Get expected output files. files = [] if node.parm("trange").eval() == 0: frame = int(hou.frame()) files.append(node.parm("vm_picture").evalAtFrame(frame)) else: start = node.parm("f1").eval() end = node.parm("f2").eval() step = node.parm("f3").eval() for frame in range(int(start), int(end) + 1, int(step)): files.append(node.parm("vm_picture").evalAtFrame(frame)) # Get extension ext = os.path.splitext(files[0])[1] # Create output collection. collections = clique.assemble(files, minimum_items=1)[0] collection = None for col in collections: if col.format("{tail}") == ext: collection = col if collection: instance.data["render"] = collection.format()
def _fetchChildren(self): '''Fetch and return new child items.''' children = [] # List paths under this directory. paths = [] for name in os.listdir(self.path): paths.append(os.path.normpath(os.path.join(self.path, name))) # Handle collections. collections, remainder = clique.assemble( paths, [clique.PATTERNS['frames']] ) for path in remainder: try: child = ItemFactory(path) except ValueError: pass else: children.append(child) for collection in collections: children.append(Collection(collection)) return children
def _fetchChildren(self): '''Fetch and return new child items.''' children = [] # List paths under this directory. paths = [] for name in os.listdir(self.path): paths.append(os.path.normpath(os.path.join(self.path, name))) # Handle collections. collections, remainder = clique.assemble(paths, [clique.PATTERNS['frames']]) for path in remainder: try: child = ItemFactory(path) except ValueError: pass else: children.append(child) for collection in collections: children.append(Collection(collection)) return children
def create_instance(self, context, in_data): subset = in_data["subset"] # If instance data already contain families then use it instance_families = in_data.get("families") or [] # Make sure default families are in instance for default_family in self.default_families or []: if default_family not in instance_families: instance_families.append(default_family) instance = context.create_instance(subset) instance.data.update( { "subset": subset, "asset": in_data["asset"], "label": subset, "name": subset, "family": in_data["family"], # "version": in_data.get("version", 1), "frameStart": in_data.get("representations", [None])[0].get( "frameStart", None ), "frameEnd": in_data.get("representations", [None])[0].get( "frameEnd", None ), "families": instance_families } ) self.log.info("collected instance: {}".format(pformat(instance.data))) self.log.info("parsing data: {}".format(pformat(in_data))) instance.data["destination_list"] = list() instance.data["representations"] = list() instance.data["source"] = "standalone publisher" for component in in_data["representations"]: component["destination"] = component["files"] component["stagingDir"] = component["stagingDir"] if isinstance(component["files"], list): collections, _remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) instance.data["fps"] = int(component["fps"]) ext = component["ext"] if ext.startswith("."): component["ext"] = ext[1:] if component["preview"]: instance.data["families"].append("review") component["tags"] = ["review"] self.log.debug("Adding review family") if "psd" in component["name"]: instance.data["source"] = component["files"] self.log.debug("Adding image:background_batch family") instance.data["representations"].append(component)
def delete_only_repre_files(self, dir_paths, file_paths): for dir_id, dir_path in dir_paths.items(): dir_files = os.listdir(dir_path) collections, remainders = clique.assemble(dir_files) for file_path, seq_path in file_paths[dir_id]: file_path_base = os.path.split(file_path)[1] # Just remove file if `frame` key was not in context or # filled path is in remainders (single file sequence) if not seq_path or file_path_base in remainders: if not os.path.exists(file_path): self.log.warning( "File was not found: {}".format(file_path)) continue os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) remainders.remove(file_path_base) continue seq_path_base = os.path.split(seq_path)[1] head, tail = seq_path_base.split(self.sequence_splitter) final_col = None for collection in collections: if head != collection.head or tail != collection.tail: continue final_col = collection break if final_col is not None: # Fill full path to head final_col.head = os.path.join(dir_path, final_col.head) for _file_path in final_col: if os.path.exists(_file_path): os.remove(_file_path) _seq_path = final_col.format("{head}{padding}{tail}") self.log.debug("Removed files: {}".format(_seq_path)) collections.remove(final_col) elif os.path.exists(file_path): os.remove(file_path) self.log.debug("Removed file: {}".format(file_path)) else: self.log.warning( "File was not found: {}".format(file_path)) # Delete as much as possible parent folders for dir_path in dir_paths.values(): while True: if not os.path.exists(dir_path): dir_path = os.path.dirname(dir_path) continue if len(os.listdir(dir_path)) != 0: break self.log.debug("Removed folder: {}".format(dir_path)) os.rmdir(dir_path)
def _process_paths(self, in_paths): self.parent_widget.working_start() paths = self._get_all_paths(in_paths) collections, remainders = clique.assemble(paths) for collection in collections: self._process_collection(collection) for remainder in remainders: self._process_remainder(remainder) self.parent_widget.working_stop()
def test_assemble_with_custom_pattern(): '''Assemble with custom pattern.''' collections, _ = clique.assemble( ['head_v1.001.ext', 'head_v1.002.ext'], patterns=[re.compile('\.{0}\.ext$'.format(clique.DIGITS_PATTERN))] ) expected = [clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2]))] assert collections == expected
def process(self, instance): node = None for x in instance: if x.Class() == "Write": node = x self.log.debug("instance collected: {}".format(instance.data)) first_frame = instance.data.get("frameStart", None) last_frame = instance.data.get("frameEnd", None) node_subset_name = instance.data.get("name", None) self.log.info("Starting render") self.log.info("Start frame: {}".format(first_frame)) self.log.info("End frame: {}".format(last_frame)) # Ensure output directory exists. directory = os.path.dirname(node["file"].value()) if not os.path.exists(directory): os.makedirs(directory) # Render frames nuke.execute(node_subset_name, int(first_frame), int(last_frame)) path = node['file'].value() out_dir = os.path.dirname(path) ext = node["file_type"].value() if "representations" not in instance.data: instance.data["representations"] = [] collected_frames = os.listdir(out_dir) repre = { 'name': ext, 'ext': ext, 'frameStart': "%0{}d".format(len(str(last_frame))) % first_frame, 'files': collected_frames, "stagingDir": out_dir, "anatomy_template": "render" } instance.data["representations"].append(repre) self.log.info("Extracted instance '{0}' to: {1}".format( instance.name, out_dir)) instance.data['family'] = 'render' instance.data['families'].append('render') collections, remainder = clique.assemble(collected_frames) self.log.info('collections: {}'.format(str(collections))) if collections: collection = collections[0] instance.data['collection'] = collection self.log.info('Finished render') return
def test_assemble_case_insensitive(): '''Assemble collections ignoring casing.''' collections, _ = clique.assemble( ['head_v1.001.ext', 'HEAD_v1.002.ext', 'head_v1.003.ext'], case_sensitive=False ) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2, 3])) ] assert collections == expected
def create_instance(self, context, in_data): subset = in_data["subset"] instance = context.create_instance(subset) instance.data.update({ "subset": subset, "asset": in_data["asset"], "label": subset, "name": subset, "family": in_data["family"], # "version": in_data.get("version", 1), "frameStart": in_data.get("representations", [None])[0].get("frameStart", None), "frameEnd": in_data.get("representations", [None])[0].get("frameEnd", None), "families": self.default_families or [], }) self.log.info("collected instance: {}".format(pformat(instance.data))) self.log.info("parsing data: {}".format(pformat(in_data))) instance.data["destination_list"] = list() instance.data["representations"] = list() instance.data["source"] = "standalone publisher" for component in in_data["representations"]: component["destination"] = component["files"] component["stagingDir"] = component["stagingDir"] if isinstance(component["files"], list): collections, remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) instance.data["fps"] = int(component["fps"]) if component["preview"]: instance.data["families"].append("review") instance.data["repreProfiles"] = ["h264"] component["tags"] = ["review"] self.log.debug("Adding review family") if "psd" in component["name"]: instance.data["source"] = component["files"] self.log.debug("Adding image:background_batch family") instance.data["representations"].append(component)
def process(self, instance): node = instance[0] # This should be a ContextPlugin, but this is a workaround # for a bug in pyblish to run once for a family: issue #250 context = instance.context key = "__hasRun{}".format(self.__class__.__name__) if context.data.get(key, False): return else: context.data[key] = True self.log.debug("instance collected: {}".format(instance.data)) first_frame = instance.data.get("startFrame", None) last_frame = instance.data.get("endFrame", None) node_subset_name = instance.data.get("name", None) # swap path to stageDir temp_dir = self.staging_dir(instance).replace("\\", "/") output_dir = instance.data.get("outputDir") path = node['file'].value() node['file'].setValue(path.replace(output_dir, temp_dir)) self.log.info("Starting render") self.log.info("Start frame: {}".format(first_frame)) self.log.info("End frame: {}".format(last_frame)) # Render frames nuke.execute(node_subset_name, int(first_frame), int(last_frame)) # swap path back to publish path path = node['file'].value() node['file'].setValue(path.replace(temp_dir, output_dir)) if "files" not in instance.data: instance.data["files"] = list() instance.data["files"] = [os.listdir(temp_dir)] self.log.info("Extracted instance '{0}' to: {1}".format( instance.name, output_dir)) collections, remainder = clique.assemble(*instance.data['files']) self.log.info('collections: {}'.format(str(collections))) collection = collections[0] instance.data['collection'] = collection self.log.info('Finished render') return
def process(self, instance): for repre in instance.data.get('representations'): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) self.log.info('collections: {}'.format(str(collections))) self.log.info('remainder: {}'.format(str(remainder))) collection = collections[0] frame_length = int(instance.data["frameEndHandle"] - instance.data["frameStartHandle"] + 1) if frame_length != 1: if len(collections) != 1: msg = "There are multiple collections in the folder" self.log.error(msg) raise ValidationException(msg) if not collection.is_contiguous(): msg = "Some frames appear to be missing" self.log.error(msg) raise ValidationException(msg) # if len(remainder) != 0: # msg = "There are some extra files in folder" # self.log.error(msg) # raise ValidationException(msg) collected_frames_len = int(len(collection.indexes)) self.log.info('frame_length: {}'.format(frame_length)) self.log.info( 'len(collection.indexes): {}'.format(collected_frames_len)) if ("slate" in instance.data["families"]) \ and (frame_length != collected_frames_len): collected_frames_len -= 1 assert (collected_frames_len == frame_length), ( "{} missing frames. Use repair to render all frames" ).format(__name__) instance.data['collection'] = collection return
def test_assemble_case_sensitive(): '''Assemble collections respecting casing.''' collections, _ = clique.assemble( [ 'head_v1.001.ext', 'head_v1.002.ext', 'HEAD_v1.003.ext', 'HEAD_v1.004.ext' ], case_sensitive=True ) expected = [ clique.Collection('head_v1.', '.ext', 3, indexes=set([1, 2])), clique.Collection('HEAD_v1.', '.ext', 3, indexes=set([3, 4])) ] assert collections == expected
def dir_processing(self, dir_path): collections, reminders = clique.assemble(os.listdir(dir_path)) # process reminders for _rem in reminders: _rem_path = os.path.join(dir_path, _rem) # go deeper if directory if os.path.isdir(_rem_path): print(_rem_path) self.dir_processing(_rem_path) else: self.file_processing(_rem_path) # process collections for _coll in collections: _coll_path = os.path.join(dir_path, list(_coll).pop()) self.file_processing(_coll_path)
def update(self, container, representation): node = container.pop("node") collections, remainder = clique.assemble( os.listdir( os.path.dirname(api.get_representation_path(representation)))) files = [] for f in list(collections[0]): files.append( os.path.join(os.path.dirname(self.fname), f).replace("\\", "/")) harmony.send({ "function": copy_files + replace_files, "args": [files, node, 1] }) harmony.imprint(node, {"representation": str(representation["_id"])})
def update(self, container, representation): node = harmony.find_node_by_name(container["name"], "READ") path = api.get_representation_path(representation) collections, remainder = clique.assemble( os.listdir(os.path.dirname(path))) files = [] if collections: for f in list(collections[0]): files.append( os.path.join(os.path.dirname(path), f).replace("\\", "/")) else: files.append( os.path.join(os.path.dirname(path), remainder[0]).replace("\\", "/")) harmony.send({ "function": copy_files + replace_files, "args": [files, node, 1] }) # Colour node. func = """function func(args){ for( var i =0; i <= args[0].length - 1; ++i) { var red_color = new ColorRGBA(255, 0, 0, 255); var green_color = new ColorRGBA(0, 255, 0, 255); if (args[1] == "red"){ node.setColor(args[0], red_color); } if (args[1] == "green"){ node.setColor(args[0], green_color); } } } func """ if pype.lib.is_latest(representation): harmony.send({"function": func, "args": [node, "green"]}) else: harmony.send({"function": func, "args": [node, "red"]}) harmony.imprint(node, {"representation": str(representation["_id"])})
def merge_items(self, in_item): self.parent_widget.working_start() items = [] in_paths = in_item.in_data['files'] paths = in_paths for item in self.components_list.widgets(): if item.in_data['files'] == in_paths: items.append(item) continue copy_paths = paths.copy() copy_paths.extend(item.in_data['files']) collections, remainders = clique.assemble(copy_paths) if len(collections) == 1 and len(remainders) == 0: paths.extend(item.in_data['files']) items.append(item) for item in items: self._remove_item(item) self._process_paths(paths) self.parent_widget.working_stop()
def update(self, container, representation): """Update loaded containers. Args: container (dict): Container data. representation (dict): Representation data. """ self_name = self.__class__.__name__ node = container.get("nodes").pop() path = api.get_representation_path(representation) collections, remainder = clique.assemble( os.listdir(os.path.dirname(path))) files = [] if collections: for f in list(collections[0]): files.append( os.path.join(os.path.dirname(path), f).replace("\\", "/")) else: files.append( os.path.join(os.path.dirname(path), remainder[0]).replace("\\", "/")) harmony.send({ "function": f"PypeHarmony.Loaders.{self_name}.replaceFiles", "args": [files, node, 1] }) # Colour node. if pype.lib.is_latest(representation): harmony.send({ "function": "PypeHarmony.setColor", "args": [node, [0, 255, 0, 255]] }) else: harmony.send({ "function": "PypeHarmony.setColor", "args": [node, [255, 0, 0, 255]] }) harmony.imprint(node, {"representation": str(representation["_id"])})
def test_assemble(): '''Assemble collections from arbitrary items.''' items = [ 'file.ext', 'single.1.ext', '1', '3', '001', '003', 'head.001.tail', 'head.002.tail', 'head.1.tail', 'head.2.tail', 'head.001.tail3', 'head.002.tail3', 'head_v01.tail', 'head_v02.tail', 'head_v1.tail', 'head_v2.tail', 'head1_010_v1.0001.tail', 'head1_010_v1.0002.tail', 'head1_010_v2.0001.tail', 'head1_010_v2.0002.tail', 'head1_020_v1.0001.tail', 'head1_020_v1.0002.tail' ] collections, remainder = clique.assemble(items) expected = [ clique.Collection('', '', 0, indexes=set([1, 3])), clique.Collection('', '', 3, indexes=set([1, 3])), clique.Collection('head.', '.tail', 3, indexes=set([1, 2])), clique.Collection('head.', '.tail', 0, indexes=set([1, 2])), clique.Collection('head.', '.tail3', 3, indexes=set([1, 2])), clique.Collection('head_v', '.tail', 2, indexes=set([1, 2])), clique.Collection('head_v', '.tail', 0, indexes=set([1, 2])), clique.Collection('head1_010_v1.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_010_v2.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_020_v1.', '.tail', 4, indexes=set([1, 2])), clique.Collection('head1_010_v', '.0001.tail', 0, indexes=set([1, 2])), clique.Collection('head1_010_v', '.0002.tail', 0, indexes=set([1, 2])), clique.Collection('head1_', '_v1.0001.tail', 3, indexes=set([10, 20])), clique.Collection('head1_', '_v1.0002.tail', 3, indexes=set([10, 20])) ] assert sorted(collections) == sorted(expected) expected = ['file.ext', 'single.1.ext'] assert sorted(remainder) == sorted(expected)
def _process_paths(self, in_paths): self.parent_widget.working_start() paths = self._get_all_paths(in_paths) collectionable_paths = [] non_collectionable_paths = [] for path in in_paths: ext = os.path.splitext(path)[1] if ext in self.image_extensions: collectionable_paths.append(path) else: non_collectionable_paths.append(path) collections, remainders = clique.assemble(collectionable_paths) non_collectionable_paths.extend(remainders) for collection in collections: self._process_collection(collection) for remainder in non_collectionable_paths: self._process_remainder(remainder) self.parent_widget.working_stop()
def process(self, context): import os import hou import clique for instance in context: # Skip invalid instance families if "render" not in instance.data.get("families", []): continue node = instance[0] # Get expected output files. files = [] if node.parm("trange").eval() == 0: frame = int(hou.frame()) files.append(node.parm("vm_picture").evalAtFrame(frame)) else: start = node.parm("f1").eval() end = node.parm("f2").eval() step = node.parm("f3").eval() for frame in range(int(start), int(end) + 1, int(step)): files.append(node.parm("vm_picture").evalAtFrame(frame)) # Get extension ext = os.path.splitext(files[0])[1] # Create output collection. collections = clique.assemble(files, minimum_items=1)[0] collection = None for col in collections: if col.format("{tail}") == ext: collection = col if collection: instance.data["render"] = collection.format()
def filepath_from_disk(self, node, knob_value, knob_eval): """Return a valid filepath or return None""" filepath = None clique_import = False try: import clique clique_import = True except ImportError: pass if os.path.exists(knob_value): filepath = knob_value elif os.path.exists(knob_eval): filepath = knob_eval elif not isinstance(self.project_dir(), type(None)) and \ self.combined_relative_filepath_exists(knob_eval): filepath = self.combined_relative_filepath_exists( knob_eval, return_filepath=True) elif clique_import: files = [] parent_dir = os.path.dirname(knob_eval) if not os.path.exists(parent_dir): return filepath for f in os.listdir(parent_dir): files.append( os.path.abspath(os.path.join(os.path.dirname(knob_eval), f)).replace("\\", "/")) collections = clique.assemble(files, minimum_items=1)[0] collection = None for c in collections: if c.match(knob_eval): collection = c filepath = list(collection)[0] return filepath
def load(self, context, name=None, namespace=None, data=None): """Plugin entry point. Args: context (:class:`pyblish.api.Context`): Context. name (str, optional): Container name. namespace (str, optional): Container namespace. data (dict, optional): Additional data passed into loader. """ fname = Path(self.fname) self_name = self.__class__.__name__ collections, remainder = clique.assemble( os.listdir(fname.parent.as_posix())) files = [] if collections: for f in list(collections[0]): files.append(fname.parent.joinpath(f).as_posix()) else: files.append(fname.parent.joinpath(remainder[0]).as_posix()) asset = context["asset"]["name"] subset = context["subset"]["name"] group_id = str(uuid.uuid4()) read_node = harmony.send({ "function": f"PypeHarmony.Loaders.{self_name}.importFiles", # noqa: E501 "args": [files, asset, subset, 1, group_id] })["result"] return harmony.containerise(f"{asset}_{subset}", namespace, read_node, context, self_name, nodes=[read_node])
def modify_launch(event): """Return each entities in the selection in data dictionaries.""" session = get_shared_session() templates = lucidity.discover_templates() file_paths = [] paths_searched = [] for item in event["data"].get("selection", []): entity = session.get(item["entityType"].title(), item["entityId"]) template_name = templates[0].get_template_name(entity["parent"]) for template in templates: if template.name == template_name: path = template.format(entity["parent"]) for root, subFolder, files in os.walk(path): path = os.path.abspath(root) if path in paths_searched: continue else: paths_searched.append(path) for f in files: if not f.endswith(".exr"): continue file_paths.append( os.path.abspath(os.path.join(root, f))) collections = clique.assemble(list(set(file_paths)))[0] for collection in collections: event["data"]["items"].append({ "label": os.path.basename(collection.format()), "value": list(collection)[0] }) return event
def temporary_sequence(temporary_directory): '''Return temporary sequence of three files. Return the path using the `clique <http://clique.readthedocs.org/en/latest/>`_ format, for example:: /tmp/asfjsfjoj3/%04d.jpg [1-3] ''' items = [] for index in range(3): item_path = os.path.join( temporary_directory, '{0:04d}.jpg'.format(index) ) with open(item_path, 'w') as file_descriptor: file_descriptor.write(uuid.uuid4().hex) file_descriptor.close() items.append(item_path) collections, _ = clique.assemble(items) sequence_path = collections[0].format() return sequence_path
def process(self, context): import os import pymel import clique drg = pymel.core.PyNode("defaultRenderGlobals") for node in pymel.core.ls(type="renderLayer"): if node.name().startswith("defaultRenderLayer"): continue instance = context.create_instance(name=node.name()) instance.data["families"] = ["rendersetup"] instance.data["family"] = "img" instance.add(node) publish_state = node.renderable.get() instance.data["publish"] = publish_state label = "{0} - rendersetup".format(node.name()) instance.data["label"] = label # Generate collection first_image, last_image = pymel.core.renderSettings( firstImageName=True, lastImageName=True, fullPath=True, layer=node.name() ) collections = clique.assemble([first_image], minimum_items=1)[0] ext = os.path.splitext(first_image)[1] collection = collections[0] for col in collections: if col.tail == ext: collection = col render_globals = pymel.core.PyNode("defaultRenderGlobals") start_frame = int(render_globals.startFrame.get()) end_frame = int(render_globals.endFrame.get()) step_frame = int(render_globals.byFrameStep.get()) fmt = collection.format("{head}{padding}{tail}") for count in range(start_frame, end_frame + 1, step_frame): f = fmt % count collection.add(f) collection.padding = len(str(end_frame)) instance.data["endFrame"] = end_frame instance.data["startFrame"] = start_frame instance.data["stepFrame"] = step_frame instance.data["collection"] = collection # Adding renderer to families instance.data["families"].append(drg.currentRenderer.get()) # Assign toggle method def instance_toggled(instance, value): instance[0].renderable.set(value) instance.data["instanceToggled"] = instance_toggled
def test_assemble_remainder_has_no_duplicates(): '''Assemble items and check remainder contains no duplicates.''' items = ['00-11-22-33-44-55.jpg'] collections, remainder = clique.assemble(items, minimum_items=2) assert remainder == items
def test_assemble_assume_padded(items, assume_padded, expected): '''Assemble items according to assume padded option.''' collections, remainder = clique.assemble( items, assume_padded_when_ambiguous=assume_padded ) assert collections == expected
def test_assemble_no_patterns(): '''Assemble with no patterns.''' assert clique.assemble(['1', '2'], patterns=[]) == ([], ['1', '2'])
def process(self, context): import os import clique import nuke # creating instances per write node for node in nuke.allNodes(): if node.Class() != "Read": continue if not node.metadata(): continue # Determine output type output_type = "img" movie_formats = ["ari", "avi", "gif", "mov", "r3d"] if node.metadata()["input/filereader"] in movie_formats: output_type = "mov" scene_formats = ["psd"] if node.metadata()["input/filereader"] in scene_formats: output_type = "scene" # Create instance instance = context.create_instance(node.name()) instance.data["families"] = [output_type, "local", "output"] instance.data["family"] = "read" instance.add(node) path = nuke.filename(node) # Adding/Checking publish attribute if "publish" not in node.knobs(): knob = nuke.Boolean_Knob("publish", "Publish") knob.setValue(False) node.addKnob(knob) # Compare against selection selection = instance.context.data.get("selection", []) publish = bool(node["publish"].getValue()) if selection: if list(set(instance) & set(selection)): publish = True else: publish = False instance.data["publish"] = publish # Collecting file paths label = "{0} - {1}".format(node.name(), os.path.basename(path)) if output_type == "img": # This could be improved because it does not account for "#" # being in a sequence. if "#" in path: padding = path.count("#") path = path.replace( "#" * padding, "%{0:0>2}d".format(padding) ) try: collection = clique.parse(path + " []") except ValueError as e: collections, remainder = clique.assemble( [path], minimum_items=1, patterns=[clique.PATTERNS['frames']] ) if collections: collection = collections[0] else: context.remove(instance) self.log.warning( "Collection error on \"{0}\": " "{1}".format(node.name(), e) ) continue for f in os.listdir(os.path.dirname(path)): file_path = os.path.join(os.path.dirname(path), f) file_path = file_path.replace("\\", "/") if collection.match(file_path): collection.add(file_path) # Limit to frame range first = node["first"].value() last = node["last"].value() indexes = list(collection.indexes) collection.indexes.clear() collection.indexes.update( set(indexes) & set([x for x in range(first, last + 1)]) ) instance.data["collection"] = collection label = "{0} - {1}".format( node.name(), os.path.basename(collection.format()) ) else: instance.data["output_path"] = path instance.data["label"] = label def instanceToggled(instance, value): # Removing and adding the knob to support NukeAssist, where # you can't modify the knob value directly. instance[0].removeKnob(instance[0]["publish"]) knob = nuke.Boolean_Knob( "publish", "Publish" ) knob.setValue(value) instance[0].addKnob(knob) instance.data["instanceToggled"] = instanceToggled
def process(self, context): import os import clique import pymel.core as pm # Collect sets named starting with "remote". remote_members = [] for object_set in pm.ls(type="objectSet"): if object_set.name().lower().startswith("remote"): remote_members.extend(object_set.members()) # Getting render layers data. data = {} render_cams = [] drg = pm.PyNode("defaultRenderGlobals") for layer in pm.ls(type="renderLayer"): # skipping defaultRenderLayers if layer.name().endswith("defaultRenderLayer"): continue layer_data = {} render_cams = [] if layer.adjustments.get(multiIndices=True): for count in layer.adjustments.get(multiIndices=True): if not layer.adjustments[count].plug.connections(): continue if layer.adjustments[count].plug.connections()[0] == drg: attr = layer.adjustments[count].plug attr = attr.connections(plugs=True)[0] layer_value = layer.adjustments[count].value.get() layer_data[attr.name(includeNode=False)] = layer_value plug = layer.adjustments[count].plug for cam_attr in plug.connections(plugs=True, type="camera"): renderable = cam_attr.endswith("renderable") layer_value = layer.adjustments[count].value.get() if renderable and layer_value == 1.0: name = cam_attr.split(".")[0] render_cams.append(pm.PyNode(name)) render_pass = layer.connections(type="renderPass") layer_data["renderpasses"] = render_pass else: render_pass = layer.connections(type="renderPass") layer_data["renderpasses"] = render_pass layer_data["cameras"] = render_cams data[layer.name()] = layer_data # Create instances for layer in data: node = pm.PyNode(layer) # Checking instance type. instance_type = "local" if node in remote_members: instance_type = "remote" instance = context.create_instance(name=layer) instance.data["families"] = ["renderlayer", instance_type, "img"] instance.data["family"] = "img" instance.data.update(data[layer]) instance.add(node) publish_state = pm.PyNode(layer).renderable.get() instance.data["publish"] = publish_state label = "{0} - renderlayer - {1}".format(layer, instance_type) instance.data["label"] = label # Generate collection first_image, last_image = pm.renderSettings( firstImageName=True, lastImageName=True, fullPath=True, layer=layer ) # Special case for vray that has it own extention setting renderer = drg.currentRenderer.get() if "currentRenderer" in data[layer]: renderer = data[layer]["currentRenderer"] if renderer == "vray": render_settings = pm.PyNode("vraySettings") # Assuming ".png" if nothing is set. # This happens when vray is initialized with the scene. ext = ".png" if render_settings.imageFormatStr.get(): ext = "." ext += render_settings.imageFormatStr.get().split(" ")[0] first_image = os.path.splitext(first_image)[0] + ext # Adding renderer as family instance.data["families"] += [renderer] instance.data["renderer"] = renderer # Adding collection collections = clique.assemble([first_image], minimum_items=1)[0] ext = os.path.splitext(first_image)[1] collection = collections[0] for col in collections: if col.tail == ext: collection = col render_globals = pm.PyNode("defaultRenderGlobals") start_frame = int(render_globals.startFrame.get()) end_frame = int(render_globals.endFrame.get()) step_frame = int(render_globals.byFrameStep.get()) framerate = context.data["framerate"] if "endFrame" in data[layer]: end_frame = int(data[layer]["endFrame"] * framerate) if "startFrame" in data[layer]: start_frame = int(data[layer]["startFrame"] * framerate) fmt = collection.format("{head}{padding}{tail}") for count in range(start_frame, end_frame + 1, step_frame): f = fmt % count collection.add(f) instance.data["endFrame"] = end_frame instance.data["startFrame"] = start_frame instance.data["stepFrame"] = step_frame instance.data["collection"] = collection def instance_toggled(instance, value): instance[0].renderable.set(value) instance.data["instanceToggled"] = instance_toggled
def process(self, instance): """Plugin entry point.""" instance.data["toBeRenderedOn"] = "deadline" context = instance.context self._instance = instance self.payload_skeleton = copy.deepcopy(payload_skeleton_template) self._deadline_url = (context.data["system_settings"]["modules"] ["deadline"]["DEADLINE_REST_URL"]) assert self._deadline_url, "Requires DEADLINE_REST_URL" context = instance.context workspace = context.data["workspaceDir"] anatomy = context.data['anatomy'] instance.data["toBeRenderedOn"] = "deadline" filepath = None # Handle render/export from published scene or not ------------------ if self.use_published: for i in context: if "workfile" in i.data["families"]: assert i.data["publish"] is True, ( "Workfile (scene) must be published along") template_data = i.data.get("anatomyData") rep = i.data.get("representations")[0].get("name") template_data["representation"] = rep template_data["ext"] = rep template_data["comment"] = None anatomy_filled = anatomy.format(template_data) template_filled = anatomy_filled["publish"]["path"] filepath = os.path.normpath(template_filled) self.log.info( "Using published scene for render {}".format(filepath)) if not os.path.exists(filepath): self.log.error("published scene does not exist!") raise # now we need to switch scene in expected files # because <scene> token will now point to published # scene file and that might differ from current one new_scene = os.path.splitext(os.path.basename(filepath))[0] orig_scene = os.path.splitext( os.path.basename(context.data["currentFile"]))[0] exp = instance.data.get("expectedFiles") if isinstance(exp[0], dict): # we have aovs and we need to iterate over them new_exp = {} for aov, files in exp[0].items(): replaced_files = [] for f in files: replaced_files.append( f.replace(orig_scene, new_scene)) new_exp[aov] = replaced_files instance.data["expectedFiles"] = [new_exp] else: new_exp = [] for f in exp: new_exp.append(f.replace(orig_scene, new_scene)) instance.data["expectedFiles"] = [new_exp] self.log.info("Scene name was switched {} -> {}".format( orig_scene, new_scene)) all_instances = [] for result in context.data["results"]: if (result["instance"] is not None and result["instance"] not in all_instances): # noqa: E128 all_instances.append(result["instance"]) # fallback if nothing was set if not filepath: self.log.warning("Falling back to workfile") filepath = context.data["currentFile"] self.log.debug(filepath) # Gather needed data ------------------------------------------------ filename = os.path.basename(filepath) comment = context.data.get("comment", "") dirname = os.path.join(workspace, "renders") renderlayer = instance.data['setMembers'] # rs_beauty deadline_user = context.data.get("user", getpass.getuser()) jobname = "%s - %s" % (filename, instance.name) # Get the variables depending on the renderer render_variables = get_renderer_variables(renderlayer, dirname) filename_0 = render_variables["filename_0"] if self.use_published: new_scene = os.path.splitext(filename)[0] orig_scene = os.path.splitext( os.path.basename(context.data["currentFile"]))[0] filename_0 = render_variables["filename_0"].replace( orig_scene, new_scene) output_filename_0 = filename_0 # Create render folder ---------------------------------------------- try: # Ensure render folder exists os.makedirs(dirname) except OSError: pass # Fill in common data to payload ------------------------------------ payload_data = {} payload_data["filename"] = filename payload_data["filepath"] = filepath payload_data["jobname"] = jobname payload_data["deadline_user"] = deadline_user payload_data["comment"] = comment payload_data["output_filename_0"] = output_filename_0 payload_data["render_variables"] = render_variables payload_data["renderlayer"] = renderlayer payload_data["workspace"] = workspace payload_data["dirname"] = dirname self.log.info("--- Submission data:") for k, v in payload_data.items(): self.log.info("- {}: {}".format(k, v)) self.log.info("-" * 20) frame_pattern = self.payload_skeleton["JobInfo"]["Frames"] self.payload_skeleton["JobInfo"]["Frames"] = frame_pattern.format( start=int(self._instance.data["frameStartHandle"]), end=int(self._instance.data["frameEndHandle"]), step=int(self._instance.data["byFrameStep"])) self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get( "mayaRenderPlugin", "MayaPype") self.payload_skeleton["JobInfo"]["BatchName"] = filename # Job name, as seen in Monitor self.payload_skeleton["JobInfo"]["Name"] = jobname # Arbitrary username, for visualisation in Monitor self.payload_skeleton["JobInfo"]["UserName"] = deadline_user # Set job priority self.payload_skeleton["JobInfo"]["Priority"] = \ self._instance.data.get("priority", 50) if self.group != "none": self.payload_skeleton["JobInfo"]["Group"] = self.group if self.limit_groups: self.payload_skeleton["JobInfo"]["LimitGroups"] = \ ",".join(self.limit_groups) # Optional, enable double-click to preview rendered # frames from Deadline Monitor self.payload_skeleton["JobInfo"]["OutputDirectory0"] = \ os.path.dirname(output_filename_0).replace("\\", "/") self.payload_skeleton["JobInfo"]["OutputFilename0"] = \ output_filename_0.replace("\\", "/") self.payload_skeleton["JobInfo"]["Comment"] = comment self.payload_skeleton["PluginInfo"]["RenderLayer"] = renderlayer # Adding file dependencies. dependencies = instance.context.data["fileDependencies"] dependencies.append(filepath) if self.asset_dependencies: for dependency in dependencies: key = "AssetDependency" + str(dependencies.index(dependency)) self.payload_skeleton["JobInfo"][key] = dependency # Handle environments ----------------------------------------------- # We need those to pass them to pype for it to set correct context keys = [ "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", "PYPE_USERNAME", "PYPE_DEV", "PYPE_LOG_NO_COLORS" ] environment = dict( {key: os.environ[key] for key in keys if key in os.environ}, **api.Session) environment["PYPE_LOG_NO_COLORS"] = "1" environment["PYPE_MAYA_VERSION"] = cmds.about(v=True) # to recognize job from PYPE for turning Event On/Off environment["PYPE_RENDER_JOB"] = "1" self.payload_skeleton["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format(key=key, value=environment[key]) for index, key in enumerate(environment) }) # Add options from RenderGlobals------------------------------------- render_globals = instance.data.get("renderGlobals", {}) self.payload_skeleton["JobInfo"].update(render_globals) # Submit preceding export jobs ------------------------------------- export_job = None assert not all( x in instance.data["families"] for x in ['vrayscene', 'assscene']), ( "Vray Scene and Ass Scene options are mutually exclusive") if "vrayscene" in instance.data["families"]: export_job = self._submit_export(payload_data, "vray") if "assscene" in instance.data["families"]: export_job = self._submit_export(payload_data, "arnold") # Prepare main render job ------------------------------------------- if "vrayscene" in instance.data["families"]: payload = self._get_vray_render_payload(payload_data) elif "assscene" in instance.data["families"]: payload = self._get_arnold_render_payload(payload_data) else: payload = self._get_maya_payload(payload_data) # Add export job as dependency -------------------------------------- if export_job: payload["JobInfo"]["JobDependency0"] = export_job # Add list of expected files to job --------------------------------- exp = instance.data.get("expectedFiles") exp_index = 0 output_filenames = {} if isinstance(exp[0], dict): # we have aovs and we need to iterate over them for _aov, files in exp[0].items(): col, rem = clique.assemble(files) if not col and rem: # we couldn't find any collections but have # individual files. assert len(rem) == 1, ("Found multiple non related files " "to render, don't know what to do " "with them.") output_file = rem[0] if not instance.data.get("tileRendering"): payload['JobInfo'][ 'OutputFilename' + str(exp_index)] = output_file # noqa: E501 else: output_file = col[0].format('{head}{padding}{tail}') if not instance.data.get("tileRendering"): payload['JobInfo'][ 'OutputFilename' + str(exp_index)] = output_file # noqa: E501 output_filenames['OutputFilename' + str(exp_index)] = output_file # noqa: E501 exp_index += 1 else: col, rem = clique.assemble(exp) if not col and rem: # we couldn't find any collections but have # individual files. assert len(rem) == 1, ("Found multiple non related files " "to render, don't know what to do " "with them.") output_file = rem[0] if not instance.data.get("tileRendering"): payload['JobInfo'][ 'OutputFilename' + str(exp_index)] = output_file # noqa: E501 else: output_file = col[0].format('{head}{padding}{tail}') if not instance.data.get("tileRendering"): payload['JobInfo'][ 'OutputFilename' + str(exp_index)] = output_file # noqa: E501 output_filenames['OutputFilename' + str(exp_index)] = output_file plugin = payload["JobInfo"]["Plugin"] self.log.info("using render plugin : {}".format(plugin)) # Store output dir for unified publisher (filesequence) instance.data["outputDir"] = os.path.dirname(output_filename_0) self.preflight_check(instance) # Prepare tiles data ------------------------------------------------ if instance.data.get("tileRendering"): # if we have sequence of files, we need to create tile job for # every frame payload["JobInfo"]["TileJob"] = True payload["JobInfo"]["TileJobTilesInX"] = instance.data.get("tilesX") payload["JobInfo"]["TileJobTilesInY"] = instance.data.get("tilesY") payload["PluginInfo"]["ImageHeight"] = instance.data.get( "resolutionHeight") # noqa: E501 payload["PluginInfo"]["ImageWidth"] = instance.data.get( "resolutionWidth") # noqa: E501 payload["PluginInfo"]["RegionRendering"] = True assembly_payload = { "AuxFiles": [], "JobInfo": { "BatchName": payload["JobInfo"]["BatchName"], "Frames": 1, "Name": "{} - Tile Assembly Job".format( payload["JobInfo"]["Name"]), "OutputDirectory0": payload["JobInfo"]["OutputDirectory0"].replace("\\", "/"), "Plugin": self.tile_assembler_plugin, "MachineLimit": 1 }, "PluginInfo": { "CleanupTiles": 1, "ErrorOnMissing": True } } assembly_payload["JobInfo"].update(output_filenames) assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( "priority", 50) assembly_payload["JobInfo"]["UserName"] = deadline_user frame_payloads = [] assembly_payloads = [] R_FRAME_NUMBER = re.compile( r".+\.(?P<frame>[0-9]+)\..+") # noqa: N806, E501 REPL_FRAME_NUMBER = re.compile( r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 if isinstance(exp[0], dict): # we have aovs and we need to iterate over them # get files from `beauty` files = exp[0].get("beauty") # assembly files are used for assembly jobs as we need to put # together all AOVs assembly_files = list( itertools.chain.from_iterable( [f for _, f in exp[0].items()])) if not files: # if beauty doesn't exists, use first aov we found files = exp[0].get(list(exp[0].keys())[0]) else: files = exp assembly_files = files frame_jobs = {} file_index = 1 for file in files: frame = re.search(R_FRAME_NUMBER, file).group("frame") new_payload = copy.deepcopy(payload) new_payload["JobInfo"]["Name"] = \ "{} (Frame {} - {} tiles)".format( payload["JobInfo"]["Name"], frame, instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 ) self.log.info("... preparing job {}".format( new_payload["JobInfo"]["Name"])) new_payload["JobInfo"]["TileJobFrame"] = frame tiles_data = _format_tiles( file, 0, instance.data.get("tilesX"), instance.data.get("tilesY"), instance.data.get("resolutionWidth"), instance.data.get("resolutionHeight"), payload["PluginInfo"]["OutputFilePrefix"])[0] new_payload["JobInfo"].update(tiles_data["JobInfo"]) new_payload["PluginInfo"].update(tiles_data["PluginInfo"]) job_hash = hashlib.sha256("{}_{}".format(file_index, file)) frame_jobs[frame] = job_hash.hexdigest() new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest() new_payload["JobInfo"]["ExtraInfo1"] = file frame_payloads.append(new_payload) file_index += 1 file_index = 1 for file in assembly_files: frame = re.search(R_FRAME_NUMBER, file).group("frame") new_assembly_payload = copy.deepcopy(assembly_payload) new_assembly_payload["JobInfo"]["Name"] = \ "{} (Frame {})".format( assembly_payload["JobInfo"]["Name"], frame) new_assembly_payload["JobInfo"]["OutputFilename0"] = re.sub( REPL_FRAME_NUMBER, "\\1{}\\3".format("#" * len(frame)), file) new_assembly_payload["PluginInfo"][ "Renderer"] = self._instance.data["renderer"] # noqa: E501 new_assembly_payload["JobInfo"]["ExtraInfo0"] = frame_jobs[ frame] # noqa: E501 new_assembly_payload["JobInfo"]["ExtraInfo1"] = file assembly_payloads.append(new_assembly_payload) file_index += 1 self.log.info("Submitting tile job(s) [{}] ...".format( len(frame_payloads))) url = "{}/api/jobs".format(self._deadline_url) tiles_count = instance.data.get("tilesX") * instance.data.get( "tilesY") # noqa: E501 for tile_job in frame_payloads: response = self._requests_post(url, json=tile_job) if not response.ok: raise Exception(response.text) job_id = response.json()["_id"] hash = response.json()["Props"]["Ex0"] for assembly_job in assembly_payloads: if assembly_job["JobInfo"]["ExtraInfo0"] == hash: assembly_job["JobInfo"]["JobDependency0"] = job_id for assembly_job in assembly_payloads: file = assembly_job["JobInfo"]["ExtraInfo1"] # write assembly job config files now = datetime.now() config_file = os.path.join( os.path.dirname(output_filename_0), "{}_config_{}.txt".format( os.path.splitext(file)[0], now.strftime("%Y_%m_%d_%H_%M_%S"))) try: if not os.path.isdir(os.path.dirname(config_file)): os.makedirs(os.path.dirname(config_file)) except OSError: # directory is not available self.log.warning("Path is unreachable: `{}`".format( os.path.dirname(config_file))) # add config file as job auxFile assembly_job["AuxFiles"] = [config_file] with open(config_file, "w") as cf: print("TileCount={}".format(tiles_count), file=cf) print("ImageFileName={}".format(file), file=cf) print("ImageWidth={}".format( instance.data.get("resolutionWidth")), file=cf) print("ImageHeight={}".format( instance.data.get("resolutionHeight")), file=cf) tiles = _format_tiles( file, 0, instance.data.get("tilesX"), instance.data.get("tilesY"), instance.data.get("resolutionWidth"), instance.data.get("resolutionHeight"), payload["PluginInfo"]["OutputFilePrefix"])[1] sorted(tiles) for k, v in tiles.items(): print("{}={}".format(k, v), file=cf) job_idx = 1 instance.data["assemblySubmissionJobs"] = [] for ass_job in assembly_payloads: self.log.info("submitting assembly job {} of {}".format( job_idx, len(assembly_payloads))) self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) response = self._requests_post(url, json=ass_job) if not response.ok: raise Exception(response.text) instance.data["assemblySubmissionJobs"].append( response.json()["_id"]) job_idx += 1 instance.data["jobBatchName"] = payload["JobInfo"]["BatchName"] self.log.info("Setting batch name on instance: {}".format( instance.data["jobBatchName"])) else: # Submit job to farm -------------------------------------------- self.log.info("Submitting ...") self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs url = "{}/api/jobs".format(self._deadline_url) response = self._requests_post(url, json=payload) if not response.ok: raise Exception(response.text) instance.data["deadlineSubmissionJob"] = response.json()
def test_assemble_boundary_padding(items, expected): '''Assemble items across a padding boundary.''' collections, _ = clique.assemble(items) assert sorted(collections) == sorted(expected)