예제 #1
0
    def get_anatomy_filled(self):
        root_path = api.registered_root()
        project_name = self._S["AVALON_PROJECT"]
        asset_name = self._S["AVALON_ASSET"]

        io.install()
        project_entity = io.find_one({
            "type": "project",
            "name": project_name
        })
        assert project_entity, (
            "Project '{0}' was not found."
        ).format(project_name)
        log.debug("Collected Project \"{}\"".format(project_entity))

        asset_entity = io.find_one({
            "type": "asset",
            "name": asset_name,
            "parent": project_entity["_id"]
        })
        assert asset_entity, (
            "No asset found by the name '{0}' in project '{1}'"
        ).format(asset_name, project_name)

        project_name = project_entity["name"]

        log.info(
            "Anatomy object collected for project \"{}\".".format(project_name)
        )

        hierarchy_items = asset_entity["data"]["parents"]
        hierarchy = ""
        if hierarchy_items:
            hierarchy = os.path.join(*hierarchy_items)

        template_data = {
            "root": root_path,
            "project": {
                "name": project_name,
                "code": project_entity["data"].get("code")
            },
            "asset": asset_entity["name"],
            "hierarchy": hierarchy.replace("\\", "/"),
            "task": self._S["AVALON_TASK"],
            "ext": self.workfile_ext,
            "version": 1,
            "username": os.getenv("PYPE_USERNAME", "").strip()
        }

        avalon_app_name = os.environ.get("AVALON_APP_NAME")
        if avalon_app_name:
            application_def = lib.get_application(avalon_app_name)
            app_dir = application_def.get("application_dir")
            if app_dir:
                template_data["app"] = app_dir

        anatomy = Anatomy(project_name)
        anatomy_filled = anatomy.format_all(template_data).get_solved()

        return anatomy_filled
예제 #2
0
    def process(self, context):
        root_path = api.registered_root()
        task_name = api.Session["AVALON_TASK"]

        project_entity = context.data["projectEntity"]
        asset_entity = context.data["assetEntity"]

        project_name = project_entity["name"]

        context.data["anatomy"] = Anatomy(project_name)
        self.log.info("Anatomy object collected for project \"{}\".".format(
            project_name))

        hierarchy_items = asset_entity["data"]["parents"]
        hierarchy = ""
        if hierarchy_items:
            hierarchy = os.path.join(*hierarchy_items)

        context_data = {
            "root": root_path,
            "project": {
                "name": project_name,
                "code": project_entity["data"].get("code")
            },
            "asset": asset_entity["name"],
            "hierarchy": hierarchy.replace("\\", "/"),
            "task": task_name,
            "username": context.data["user"]
        }

        avalon_app_name = os.environ.get("AVALON_APP_NAME")
        if avalon_app_name:
            application_def = lib.get_application(avalon_app_name)
            app_dir = application_def.get("application_dir")
            if app_dir:
                context_data["app"] = app_dir

        datetime_data = context.data.get("datetimeData") or {}
        context_data.update(datetime_data)

        context.data["anatomyData"] = context_data

        self.log.info("Global anatomy Data collected")
        self.log.debug(json.dumps(context_data, indent=4))
예제 #3
0
    def collect_compatible_actions(self, actions):
        """Collect all actions which are compatible with the environment

        Each compatible action will be translated to a dictionary to ensure
        the action can be visualized in the launcher.

        Args:
            actions (list): list of classes

        Returns:
            list: collection of dictionaries sorted on order int he
        """

        compatible = []
        for Action in actions:
            frame = self.current_frame()

            # Build a session from current frame
            session = {
                "AVALON_{}".format(key.upper()): value
                for key, value in frame.get("environment", {}).items()
            }
            session["AVALON_PROJECTS"] = api.registered_root()
            if not Action().is_compatible(session):
                continue

            compatible.append({
                "name": str(Action.name),
                "icon": str(Action.icon or "cube"),
                "label": str(Action.label or Action.name),
                "color": getattr(Action, "color", None),
                "order": Action.order
            })

        # Sort by order and name
        compatible = sorted(compatible,
                            key=lambda action:
                            (action["order"], action["name"]))

        return compatible
예제 #4
0
    def _get_destination_path(self, asset, project):
        root = api.registered_root()
        PROJECT = api.Session["AVALON_PROJECT"]
        hierarchy = ""
        parents = asset['data']['parents']
        if parents and len(parents) > 0:
            hierarchy = os.path.join(*parents)

        template_data = {
            "root": root,
            "project": {
                "name": PROJECT,
                "code": project['data']['code']
            },
            "silo": asset.get('silo'),
            "asset": asset['name'],
            "family": 'texture',
            "subset": 'Main',
            "hierarchy": hierarchy
        }
        anatomy = Anatomy()
        anatomy_filled = os.path.normpath(
            anatomy.format(template_data)['texture']['path'])
        return anatomy_filled
예제 #5
0
파일: compat.py 프로젝트: yazici/core
def update(container, version=-1):
    """Update `container` to `version`

    Deprecated; this functionality is replaced by `api.update()`

    This function relies on a container being referenced. At the time of this
    writing, all assets - models, rigs, animations, shaders - are referenced
    and should pose no problem. But should there be an asset that isn't
    referenced then this function will need to see an update.

    Arguments:
        container (avalon-core:container-1.0): Container to update,
            from `host.ls()`.
        version (int, optional): Update the container to this version.
            If no version is passed, the latest is assumed.

    """

    from avalon import io
    from avalon import api

    node = container["objectName"]

    # Assume asset has been referenced
    reference_node = next((node for node in cmds.sets(node, query=True)
                          if cmds.nodeType(node) == "reference"), None)

    assert reference_node, ("Imported container not supported; "
                            "container must be referenced.")

    current_representation = io.find_one({
        "_id": io.ObjectId(container["representation"])
    })

    assert current_representation is not None, "This is a bug"

    version_, subset, asset, project = io.parenthood(current_representation)

    if version == -1:
        new_version = io.find_one({
            "type": "version",
            "parent": subset["_id"]
        }, sort=[("name", -1)])
    else:
        new_version = io.find_one({
            "type": "version",
            "parent": subset["_id"],
            "name": version,
        })

    new_representation = io.find_one({
        "type": "representation",
        "parent": new_version["_id"],
        "name": current_representation["name"]
    })

    assert new_version is not None, "This is a bug"

    template_publish = project["config"]["template"]["publish"]
    fname = template_publish.format(**{
        "root": api.registered_root(),
        "project": project["name"],
        "asset": asset["name"],
        "silo": asset["silo"],
        "subset": subset["name"],
        "version": new_version["name"],
        "representation": current_representation["name"],
    })

    file_type = {
        "ma": "mayaAscii",
        "mb": "mayaBinary",
        "abc": "Alembic"
    }.get(new_representation["name"])

    assert file_type, ("Unsupported representation: %s" % new_representation)

    assert os.path.exists(fname), "%s does not exist." % fname
    cmds.file(fname, loadReference=reference_node, type=file_type)

    # Update metadata
    cmds.setAttr(container["objectName"] + ".representation",
                 str(new_representation["_id"]),
                 type="string")
예제 #6
0
    def process(self, instance):
        """
        Detect type of renderfarm submission and create and post dependend job
        in case of Deadline. It creates json file with metadata needed for
        publishing in directory of render.

        :param instance: Instance data
        :type instance: dict
        """
        data = instance.data.copy()
        context = instance.context
        self.context = context

        if hasattr(instance, "_log"):
            data['_log'] = instance._log
        render_job = data.pop("deadlineSubmissionJob", None)
        submission_type = "deadline"
        if not render_job:
            # No deadline job. Try Muster: musterSubmissionJob
            render_job = data.pop("musterSubmissionJob", None)
            submission_type = "muster"
            assert render_job, (
                "Can't continue without valid Deadline "
                "or Muster submission prior to this "
                "plug-in."
            )

        if submission_type == "deadline":
            self.DEADLINE_REST_URL = os.environ.get(
                "DEADLINE_REST_URL", "http://localhost:8082"
            )
            assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"

            self._submit_deadline_post_job(instance, render_job)

        asset = data.get("asset") or api.Session["AVALON_ASSET"]
        subset = data.get("subset")

        start = instance.data.get("frameStart")
        if start is None:
            start = context.data["frameStart"]

        end = instance.data.get("frameEnd")
        if end is None:
            end = context.data["frameEnd"]

        handle_start = instance.data.get("handleStart")
        if handle_start is None:
            handle_start = context.data["handleStart"]

        handle_end = instance.data.get("handleEnd")
        if handle_end is None:
            handle_end = context.data["handleEnd"]

        fps = instance.data.get("fps")
        if fps is None:
            fps = context.data["fps"]

        if data.get("extendFrames", False):
            start, end = self._extend_frames(
                asset,
                subset,
                start,
                end,
                data["overrideExistingFrame"])

        try:
            source = data["source"]
        except KeyError:
            source = context.data["currentFile"]

        source = source.replace(
            os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
        )
        relative_path = os.path.relpath(source, api.registered_root())
        source = os.path.join("{root}", relative_path).replace("\\", "/")

        families = ["render"]

        instance_skeleton_data = {
            "family": "render",
            "subset": subset,
            "families": families,
            "asset": asset,
            "frameStart": start,
            "frameEnd": end,
            "handleStart": handle_start,
            "handleEnd": handle_end,
            "frameStartHandle": start - handle_start,
            "frameEndHandle": end + handle_end,
            "fps": fps,
            "source": source,
            "extendFrames": data.get("extendFrames"),
            "overrideExistingFrame": data.get("overrideExistingFrame"),
            "pixelAspect": data.get("pixelAspect", 1),
            "resolutionWidth": data.get("resolutionWidth", 1920),
            "resolutionHeight": data.get("resolutionHeight", 1080),
            "multipartExr": data.get("multipartExr", False)
        }

        if "prerender" in instance.data["families"]:
            instance_skeleton_data.update({
                "family": "prerender",
                "families": []})

        # transfer specific families from original instance to new render
        for item in self.families_transfer:
            if item in instance.data.get("families", []):
                instance_skeleton_data["families"] += [item]

        if "render.farm" in instance.data["families"]:
            instance_skeleton_data.update({
                "family": "render2d",
                "families": ["render"] + [f for f in instance.data["families"]
                                          if "render.farm" not in f]
            })

        # transfer specific properties from original instance based on
        # mapping dictionary `instance_transfer`
        for key, values in self.instance_transfer.items():
            if key in instance.data.get("families", []):
                for v in values:
                    instance_skeleton_data[v] = instance.data.get(v)

        # look into instance data if representations are not having any
        # which are having tag `publish_on_farm` and include them
        for r in instance.data.get("representations", []):
            if "publish_on_farm" in r.get("tags"):
                # create representations attribute of not there
                if "representations" not in instance_skeleton_data.keys():
                    instance_skeleton_data["representations"] = []

                instance_skeleton_data["representations"].append(r)

        instances = None
        assert data.get("expectedFiles"), ("Submission from old Pype version"
                                           " - missing expectedFiles")

        """
        if content of `expectedFiles` are dictionaries, we will handle
        it as list of AOVs, creating instance from every one of them.

        Example:
        --------

        expectedFiles = [
            {
                "beauty": [
                    "foo_v01.0001.exr",
                    "foo_v01.0002.exr"
                ],

                "Z": [
                    "boo_v01.0001.exr",
                    "boo_v01.0002.exr"
                ]
            }
        ]

        This will create instances for `beauty` and `Z` subset
        adding those files to their respective representations.

        If we've got only list of files, we collect all filesequences.
        More then one doesn't probably make sense, but we'll handle it
        like creating one instance with multiple representations.

        Example:
        --------

        expectedFiles = [
            "foo_v01.0001.exr",
            "foo_v01.0002.exr",
            "xxx_v01.0001.exr",
            "xxx_v01.0002.exr"
        ]

        This will result in one instance with two representations:
        `foo` and `xxx`
        """

        self.log.info(data.get("expectedFiles"))

        if isinstance(data.get("expectedFiles")[0], dict):
            # we cannot attach AOVs to other subsets as we consider every
            # AOV subset of its own.

            if len(data.get("attachTo")) > 0:
                assert len(data.get("expectedFiles")[0].keys()) == 1, (
                    "attaching multiple AOVs or renderable cameras to "
                    "subset is not supported")

            # create instances for every AOV we found in expected files.
            # note: this is done for every AOV and every render camere (if
            #       there are multiple renderable cameras in scene)
            instances = self._create_instances_for_aov(
                instance_skeleton_data,
                data.get("expectedFiles"))
            self.log.info("got {} instance{}".format(
                len(instances),
                "s" if len(instances) > 1 else ""))

        else:
            representations = self._get_representations(
                instance_skeleton_data,
                data.get("expectedFiles")
            )

            if "representations" not in instance_skeleton_data.keys():
                instance_skeleton_data["representations"] = []

            # add representation
            instance_skeleton_data["representations"] += representations
            instances = [instance_skeleton_data]

        # if we are attaching to other subsets, create copy of existing
        # instances, change data to match thats subset and replace
        # existing instances with modified data
        if instance.data.get("attachTo"):
            self.log.info("Attaching render to subset:")
            new_instances = []
            for at in instance.data.get("attachTo"):
                for i in instances:
                    new_i = copy(i)
                    new_i["version"] = at.get("version")
                    new_i["subset"] = at.get("subset")
                    new_i["append"] = True
                    new_i["families"].append(at.get("family"))
                    new_instances.append(new_i)
                    self.log.info("  - {} / v{}".format(
                        at.get("subset"), at.get("version")))
            instances = new_instances

        # publish job file
        publish_job = {
            "asset": asset,
            "frameStart": start,
            "frameEnd": end,
            "fps": context.data.get("fps", None),
            "source": source,
            "user": context.data["user"],
            "version": context.data["version"],  # this is workfile version
            "intent": context.data.get("intent"),
            "comment": context.data.get("comment"),
            "job": render_job,
            "session": api.Session.copy(),
            "instances": instances
        }

        # pass Ftrack credentials in case of Muster
        if submission_type == "muster":
            ftrack = {
                "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
                "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
                "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
            }
            publish_job.update({"ftrack": ftrack})

        # Ensure output dir exists
        output_dir = instance.data["outputDir"]
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)

        metadata_filename = "{}_metadata.json".format(subset)

        metadata_path = os.path.join(output_dir, metadata_filename)
        self.log.info("Writing json file: {}".format(metadata_path))
        with open(metadata_path, "w") as f:
            json.dump(publish_job, f, indent=4, sort_keys=True)
예제 #7
0
    def process(self, instance):
        """
        Detect type of renderfarm submission and create and post dependend job
        in case of Deadline. It creates json file with metadata needed for
        publishing in directory of render.

        :param instance: Instance data
        :type instance: dict
        """
        # Get a submission job
        data = instance.data.copy()
        render_job = data.pop("deadlineSubmissionJob", None)
        submission_type = "deadline"

        if not render_job:
            # No deadline job. Try Muster: musterSubmissionJob
            render_job = data.pop("musterSubmissionJob", None)
            submission_type = "muster"
            if not render_job:
                raise RuntimeError("Can't continue without valid Deadline "
                                   "or Muster submission prior to this "
                                   "plug-in.")

        if submission_type == "deadline":
            self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
                                                    "http://localhost:8082")
            assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"

            self._submit_deadline_post_job(instance, render_job)

        asset = data.get("asset") or api.Session["AVALON_ASSET"]
        subset = data["subset"]

        # Get start/end frame from instance, if not available get from context
        context = instance.context
        start = instance.data.get("frameStart")
        if start is None:
            start = context.data["frameStart"]
        end = instance.data.get("frameEnd")
        if end is None:
            end = context.data["frameEnd"]

        # Add in regex for sequence filename
        # This assumes the output files start with subset name and ends with
        # a file extension. The "ext" key includes the dot with the extension.
        if "ext" in instance.data:
            ext = r"\." + re.escape(instance.data["ext"])
        else:
            ext = r"\.\D+"

        regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
                                               ext=ext)

        try:
            source = data['source']
        except KeyError:
            source = context.data["currentFile"]

        source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
                                api.registered_root())

        relative_path = os.path.relpath(source, api.registered_root())
        source = os.path.join("{root}", relative_path).replace("\\", "/")

        # Write metadata for publish job
        metadata = {
            "asset": asset,
            "regex": regex,
            "frameStart": start,
            "frameEnd": end,
            "fps": context.data.get("fps", None),
            "families": ["render"],
            "source": source,
            "user": context.data["user"],
            "version": context.data["version"],
            # Optional metadata (for debugging)
            "metadata": {
                "instance": data,
                "job": render_job,
                "session": api.Session.copy()
            }
        }

        if submission_type == "muster":
            ftrack = {
                "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
                "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
                "FTRACK_SERVER": os.environ.get("FTRACK_SERVER")
            }
            metadata.update({"ftrack": ftrack})

        # Ensure output dir exists
        output_dir = instance.data["outputDir"]
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)

        if data.get("extendFrames", False):

            family = "render"
            override = data["overrideExistingFrame"]

            # override = data.get("overrideExistingFrame", False)
            out_file = render_job.get("OutFile")
            if not out_file:
                raise RuntimeError("OutFile not found in render job!")

            extension = os.path.splitext(out_file[0])[1]
            _ext = extension[1:]

            # Frame comparison
            prev_start = None
            prev_end = None
            resource_range = range(int(start), int(end) + 1)

            # Gather all the subset files (one subset per render pass!)
            subset_names = [data["subset"]]
            subset_names.extend(data.get("renderPasses", []))
            resources = []
            for subset_name in subset_names:
                version = get_latest_version(asset_name=data["asset"],
                                             subset_name=subset_name,
                                             family=family)

                # Set prev start / end frames for comparison
                if not prev_start and not prev_end:
                    prev_start = version["data"]["frameStart"]
                    prev_end = version["data"]["frameEnd"]

                subset_resources = get_resources(version, _ext)
                resource_files = get_resource_files(subset_resources,
                                                    resource_range, override)

                resources.extend(resource_files)

            updated_start = min(start, prev_start)
            updated_end = max(end, prev_end)

            # Update metadata and instance start / end frame
            self.log.info("Updating start / end frame : "
                          "{} - {}".format(updated_start, updated_end))

            # TODO : Improve logic to get new frame range for the
            # publish job (publish_filesequence.py)
            # The current approach is not following Pyblish logic
            # which is based
            # on Collect / Validate / Extract.

            # ---- Collect Plugins  ---
            # Collect Extend Frames - Only run if extendFrames is toggled
            # # # Store in instance:
            # # # Previous rendered files per subset based on frames
            # # # --> Add to instance.data[resources]
            # # # Update publish frame range

            # ---- Validate Plugins ---
            # Validate Extend Frames
            # # # Check if instance has the requirements to extend frames
            # There might have been some things which can be added to the list
            # Please do so when fixing this.

            # Start frame
            metadata["frameStart"] = updated_start
            metadata["metadata"]["instance"]["frameStart"] = updated_start

            # End frame
            metadata["frameEnd"] = updated_end
            metadata["metadata"]["instance"]["frameEnd"] = updated_end

        metadata_filename = "{}_metadata.json".format(subset)

        metadata_path = os.path.join(output_dir, metadata_filename)
        # convert log messages if they are `LogRecord` to their
        # string format to allow serializing as JSON later on.
        rendered_logs = []
        for log in metadata["metadata"]["instance"].get("_log", []):
            if isinstance(log, logging.LogRecord):
                rendered_logs.append(log.getMessage())
            else:
                rendered_logs.append(log)

        metadata["metadata"]["instance"]["_log"] = rendered_logs
        with open(metadata_path, "w") as f:
            json.dump(metadata, f, indent=4, sort_keys=True)

        # Copy files from previous render if extendFrame is True
        if data.get("extendFrames", False):

            self.log.info("Preparing to copy ..")
            import shutil

            dest_path = data["outputDir"]
            for source in resources:
                src_file = os.path.basename(source)
                dest = os.path.join(dest_path, src_file)
                shutil.copy(source, dest)

            self.log.info("Finished copying %i files" % len(resources))
예제 #8
0
    def process(self, instance):

        DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
                                           "http://localhost:8082")
        assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"

        # try:
        #     deadline_url = os.environ["DEADLINE_REST_URL"]
        # except KeyError:
        #     self.log.error("Deadline REST API url not found.")

        # Get a submission job
        job = instance.data.get("deadlineSubmissionJob")
        if not job:
            raise RuntimeError("Can't continue without valid deadline "
                               "submission prior to this plug-in.")

        data = instance.data.copy()
        asset = data.get("asset") or api.Session["AVALON_ASSET"]
        subset = data["subset"]
        state = data.get("publishJobState", "Suspended")
        job_name = "{batch} - {subset} [publish image sequence]".format(
            batch=job["Props"]["Name"], subset=subset)

        # Get start/end frame from instance, if not available get from context
        context = instance.context
        start = instance.data.get("startFrame")
        if start is None:
            start = context.data["startFrame"]
        end = instance.data.get("endFrame")
        if end is None:
            end = context.data["endFrame"]

        # Add in regex for sequence filename
        # This assumes the output files start with subset name and ends with
        # a file extension. The "ext" key includes the dot with the extension.
        if "ext" in instance.data:
            ext = re.escape(instance.data["ext"])
        else:
            ext = "\.\D+"

        regex = "^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
                                              ext=ext)

        try:
            source = data['source']
        except KeyError:
            source = context.data["currentFile"]

        relative_path = os.path.relpath(source, api.registered_root())
        source = os.path.join("{root}", relative_path).replace("\\", "/")

        # Write metadata for publish job
        render_job = data.pop("deadlineSubmissionJob")
        metadata = {
            "asset": asset,
            "regex": regex,
            "startFrame": start,
            "endFrame": end,
            "fps": context.data.get("fps", None),
            "families": ["render"],
            "source": source,
            "user": context.data["user"],

            # Optional metadata (for debugging)
            "metadata": {
                "instance": data,
                "job": job,
                "session": api.Session.copy()
            }
        }

        # Ensure output dir exists
        output_dir = instance.data["outputDir"]
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)

        if data.get("extendFrames", False):

            family = "render"
            override = data["overrideExistingFrame"]

            # override = data.get("overrideExistingFrame", False)
            out_file = render_job.get("OutFile")
            if not out_file:
                raise RuntimeError("OutFile not found in render job!")

            extension = os.path.splitext(out_file[0])[1]
            _ext = extension[1:]

            # Frame comparison
            prev_start = None
            prev_end = None
            resource_range = range(int(start), int(end) + 1)

            # Gather all the subset files (one subset per render pass!)
            subset_names = [data["subset"]]
            subset_names.extend(data.get("renderPasses", []))

            for subset_name in subset_names:
                version = get_latest_version(asset_name=data["asset"],
                                             subset_name=subset_name,
                                             family=family)

                # Set prev start / end frames for comparison
                if not prev_start and not prev_end:
                    prev_start = version["data"]["startFrame"]
                    prev_end = version["data"]["endFrame"]

                subset_resources = get_resources(version, _ext)
                resource_files = get_resource_files(subset_resources,
                                                    resource_range, override)

                resources.extend(resource_files)

            updated_start = min(start, prev_start)
            updated_end = max(end, prev_end)

            # Update metadata and instance start / end frame
            self.log.info("Updating start / end frame : "
                          "{} - {}".format(updated_start, updated_end))

            # TODO : Improve logic to get new frame range for the
            # publish job (publish_filesequence.py)
            # The current approach is not following Pyblish logic which is based
            # on Collect / Validate / Extract.

            # ---- Collect Plugins  ---
            # Collect Extend Frames - Only run if extendFrames is toggled
            # # # Store in instance:
            # # # Previous rendered files per subset based on frames
            # # # --> Add to instance.data[resources]
            # # # Update publish frame range

            # ---- Validate Plugins ---
            # Validate Extend Frames
            # # # Check if instance has the requirements to extend frames
            # There might have been some things which can be added to the list
            # Please do so when fixing this.

            # Start frame
            metadata["startFrame"] = updated_start
            metadata["metadata"]["instance"]["startFrame"] = updated_start

            # End frame
            metadata["endFrame"] = updated_end
            metadata["metadata"]["instance"]["endFrame"] = updated_end

        metadata_filename = "{}_metadata.json".format(subset)
        metadata_path = os.path.join(output_dir, metadata_filename)
        with open(metadata_path, "w") as f:
            json.dump(metadata, f, indent=4, sort_keys=True)

        # Generate the payload for Deadline submission
        payload = {
            "JobInfo": {
                "Plugin": "Python",
                "BatchName": job["Props"]["Batch"],
                "Name": job_name,
                "JobType": "Normal",
                "JobDependency0": job["_id"],
                "UserName": job["Props"]["User"],
                "Comment": instance.context.data.get("comment", ""),
                "InitialStatus": state
            },
            "PluginInfo": {
                "Version": "3.6",
                "ScriptFile": _get_script(),
                "Arguments": '--path "{}"'.format(metadata_path),
                "SingleFrameOnly": "True"
            },

            # Mandatory for Deadline, may be empty
            "AuxFiles": []
        }

        # Transfer the environment from the original job to this dependent
        # job so they use the same environment
        environment = job["Props"].get("Env", {})
        payload["JobInfo"].update({
            "EnvironmentKeyValue%d" % index:
            "{key}={value}".format(key=key, value=environment[key])
            for index, key in enumerate(environment)
        })

        # Avoid copied pools and remove secondary pool
        payload["JobInfo"]["Pool"] = "none"
        payload["JobInfo"].pop("SecondaryPool", None)

        self.log.info("Submitting..")
        self.log.info(json.dumps(payload, indent=4, sort_keys=True))

        url = "{}/api/jobs".format(DEADLINE_REST_URL)
        response = requests.post(url, json=payload)
        if not response.ok:
            raise Exception(response.text)

        # Copy files from previous render if extendFrame is True
        if data.get("extendFrames", False):

            self.log.info("Preparing to copy ..")
            import shutil

            dest_path = data["outputDir"]
            for source in resources:
                src_file = os.path.basename(source)
                dest = os.path.join(dest_path, src_file)
                shutil.copy(source, dest)

            self.log.info("Finished copying %i files" % len(resources))
    def unpack_textures(self, container):
        import os
        import shutil
        from maya import cmds, mel
        from avalon import api, io

        project = io.find_one({"type": "project"},
                              projection={"name": True,
                                          "config.template.publish": True})
        asset = io.find_one({"_id": io.ObjectId(container["assetId"])},
                            projection={"name": True, "silo": True})
        subset = io.find_one({"_id": io.ObjectId(container["subsetId"])},
                             projection={"name": True})
        version = io.find_one({"_id": io.ObjectId(container["versionId"])},
                              projection={"name": True,
                                          "data.dependencies": True})
        # Find TexturePack
        id = next(iter(version["data"]["dependencies"]))
        dep_version = io.find_one({"_id": io.ObjectId(id)})
        dep_subset = io.find_one({"_id": dep_version["parent"]})
        dep_representation = io.find_one({"parent": dep_version["_id"],
                                          "name": "TexturePack"})
        # List texture versions
        published = dict()
        template_publish = project["config"]["template"]["publish"]
        for data in dep_representation["data"]["fileInventory"]:
            path = template_publish.format(
                root=api.registered_root(),
                project=project["name"],
                silo=asset["silo"],
                asset=asset["name"],
                subset=dep_subset["name"],
                version=data["version"],
                representation="TexturePack",
            )
            published[data["version"]] = path

        # Collect path,
        # filter out textures that is being used in this look
        file_nodes = cmds.ls(cmds.sets(container["objectName"], query=True),
                             type="file")
        files = dict()
        for node in file_nodes:
            path = cmds.getAttr(node + ".fileTextureName",
                                expandEnvironmentVariables=True)
            if not os.path.isfile(path):
                continue

            for v, p in published.items():
                if path.startswith(p):
                    key = (v, p)
                    if key not in files:
                        files[key] = list()
                    files[key].append(node)
                    break

        # Copy textures and change path
        root = cmds.workspace(query=True, rootDirectory=True)
        root += mel.eval('workspace -query -fileRuleEntry "sourceImages"')
        root += "/_unpacked"

        pattern = "/{asset}/{subset}.v{version:0>3}/TexturePack.v{texture:0>3}"
        for (texture_version, src), nodes in files.items():
            dst = root + pattern.format(asset=asset["name"],
                                        subset=subset["name"],
                                        version=version["name"],
                                        texture=texture_version)
            for node in nodes:
                attr = node + ".fileTextureName"
                path = cmds.getAttr(attr, expandEnvironmentVariables=True)
                tail = path.split("TexturePack")[-1]
                cmds.setAttr(attr, dst + tail, type="string")

            if os.path.isdir(dst):
                continue

            shutil.copytree(src, dst)