示例#1
0
    def update(self, container, representation):
        """ Switch asset or change version """
        layer = container.pop("layer")

        context = representation.get("context", {})

        namespace_from_container = re.sub(r'_\d{3}$', '',
                                          container["namespace"])
        layer_name = "{}_{}".format(context["asset"], context["subset"])
        # switching assets
        if namespace_from_container != layer_name:
            layers = stub.get_items(comps=True)
            existing_layers = [layer.name for layer in layers]
            layer_name = lib.get_unique_layer_name(
                existing_layers, "{}_{}".format(context["asset"],
                                                context["subset"]))
        else:  # switching version - keep same name
            layer_name = container["namespace"]
        path = api.get_representation_path(representation)
        # with aftereffects.maintained_selection():  # TODO
        stub.replace_item(layer, path, stub.LOADED_ICON + layer_name)
        stub.imprint(
            layer, {
                "representation": str(representation["_id"]),
                "name": context["subset"],
                "namespace": layer_name
            })
示例#2
0
    def update(self, container, representation):

        path = api.get_representation_path(representation)
        namespace = "{}:".format(container["namespace"])
        members = cmds.sets(container['objectName'], query=True)
        yeti_node = cmds.ls(members, type="pgYetiMaya")

        # TODO: Count the amount of nodes cached
        # To ensure new nodes get created or old nodes get destroyed

        for node in yeti_node:
            # Remove local given namespace
            node_name = node.split(namespace, 1)[-1]
            file_name = node_name.replace(":", "_")

            # Check if the node has a cache
            tmp_cache = os.path.join(path, "{}.%04d.fur".format(file_name))
            fpath = self.validate_cache(os.path.normpath(tmp_cache))

            # Update the attribute
            cmds.setAttr("{}.cacheFileName".format(node), fpath, type="string")

        # Update the container
        cmds.setAttr("{}.representation".format(container["objectName"]),
                     str(representation["_id"]),
                     type="string")
示例#3
0
    def load(self, context, name=None, namespace=None, data=None):
        # Import template.
        temp_dir = tempfile.mkdtemp()
        zip_file = api.get_representation_path(context["representation"])
        template_path = os.path.join(temp_dir, "temp.tpl")
        with zipfile.ZipFile(zip_file, "r") as zip_ref:
            zip_ref.extractall(template_path)

        sig = harmony.signature("paste")
        func = """function %s(args)
        {
            var template_path = args[0];
            var drag_object = copyPaste.pasteTemplateIntoGroup(
                template_path, "Top", 1
            );
        }
        %s
        """ % (sig, sig)

        harmony.send({"function": func, "args": [template_path]})

        shutil.rmtree(temp_dir)

        subset_name = context["subset"]["name"]

        return harmony.containerise(subset_name, namespace, subset_name,
                                    context, self.__class__.__name__)

        def update(self, container, representation):
            pass

        def remove(self, container):
            pass
示例#4
0
    def update(self, container, representation):

        import pymel.core as pm

        path = api.get_representation_path(representation)

        files_in_path = os.listdir(os.path.split(path)[0])
        sequence = 0
        collections, remainder = clique.assemble(files_in_path)
        if collections:
            sequence = 1

        # Update the standin
        standins = list()
        members = pm.sets(container['objectName'], query=True)
        for member in members:
            shape = member.getShape()
            if (shape and shape.type() == "aiStandIn"):
                standins.append(shape)

        for standin in standins:
            standin.dso.set(path)
            standin.useFrameExtension.set(sequence)

        container = pm.PyNode(container["objectName"])
        container.representation.set(str(representation["_id"]))
示例#5
0
    def update(self, container, representation):

        import os
        from maya import cmds

        node = container["objectName"]

        path = api.get_representation_path(representation)
        # path = self.fname
        proxyPath = os.path.splitext(path)[0] + ".ma"

        # Get reference node from container members
        members = cmds.sets(node, query=True, nodesOnly=True)
        reference_node = self._get_reference_node(members)

        assert os.path.exists(path), "%s does not exist." % proxyPath

        try:
            content = cmds.file(proxyPath,
                                loadReference=reference_node,
                                type="mayaAscii",
                                returnNewNodes=True)

            # Set attributes
            proxyShape = pm.ls(content, type="mesh")[0]

            proxyShape.aiTranslator.set('procedural')
            proxyShape.dso.set(path)
            proxyShape.aiOverrideShaders.set(0)

        except RuntimeError as exc:
            # When changing a reference to a file that has load errors the
            # command will raise an error even if the file is still loaded
            # correctly (e.g. when raising errors on Arnold attributes)
            # When the file is loaded and has content, we consider it's fine.
            if not cmds.referenceQuery(reference_node, isLoaded=True):
                raise

            content = cmds.referenceQuery(reference_node,
                                          nodes=True,
                                          dagPath=True)
            if not content:
                raise

            self.log.warning("Ignoring file read error:\n%s", exc)

        # Add new nodes of the reference to the container
        cmds.sets(content, forceElement=node)

        # Remove any placeHolderList attribute entries from the set that
        # are remaining from nodes being removed from the referenced file.
        members = cmds.sets(node, query=True)
        invalid = [x for x in members if ".placeHolderList" in x]
        if invalid:
            cmds.sets(invalid, remove=node)

        # Update metadata
        cmds.setAttr("{}.representation".format(node),
                     str(representation["_id"]),
                     type="string")
示例#6
0
    def update(self, container, representation):
        import pymel.core as pm
        image_plane_shape = None
        for node in pm.PyNode(container["objectName"]).members():
            if node.nodeType() == "imagePlane":
                image_plane_shape = node

        assert image_plane_shape is not None, "Image plane not found."

        path = api.get_representation_path(representation)
        image_plane_shape.imageName.set(path)
        cmds.setAttr(container["objectName"] + ".representation",
                     str(representation["_id"]),
                     type="string")

        # Set frame range.
        version = io.find_one({"_id": representation["parent"]})
        subset = io.find_one({"_id": version["parent"]})
        asset = io.find_one({"_id": subset["parent"]})
        start_frame = asset["data"]["frameStart"]
        end_frame = asset["data"]["frameEnd"]
        image_plane_shape.frameOffset.set(1 - start_frame)
        image_plane_shape.frameIn.set(start_frame)
        image_plane_shape.frameOut.set(end_frame)
        image_plane_shape.frameCache.set(end_frame)
示例#7
0
    def load(self, context, name=None, namespace=None, data=None):
        """Plugin entry point.

        Args:
            context (:class:`pyblish.api.Context`): Context.
            name (str, optional): Container name.
            namespace (str, optional): Container namespace.
            data (dict, optional): Additional data passed into loader.

        """
        # Load template.
        self_name = self.__class__.__name__
        temp_dir = tempfile.mkdtemp()
        zip_file = api.get_representation_path(context["representation"])
        template_path = os.path.join(temp_dir, "temp.tpl")
        with zipfile.ZipFile(zip_file, "r") as zip_ref:
            zip_ref.extractall(template_path)

        group_id = "{}".format(uuid.uuid4())

        container_group = harmony.send({
            "function":
            f"PypeHarmony.Loaders.{self_name}.loadContainer",
            "args": [
                template_path, context["asset"]["name"],
                context["subset"]["name"], group_id
            ]
        })["result"]

        # Cleanup the temp directory
        shutil.rmtree(temp_dir)

        # We must validate the group_node
        return harmony.containerise(name, namespace, container_group, context,
                                    self_name)
示例#8
0
    def update(self, container, representation):
        node = container["objectName"]
        source_path = api.get_representation_path(representation)
        destination_path = container["namespace"]

        task = unreal.AssetImportTask()

        task.filename = source_path
        task.destination_path = destination_path
        # strip suffix
        task.destination_name = node[:-4]
        task.replace_existing = True
        task.automated = True
        task.save = True

        task.options = unreal.FbxImportUI()
        task.options.import_animations = False

        # do import fbx and replace existing data
        unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
        container_path = "{}/{}".format(container["namespace"],
                                        container["objectName"])
        # update metadata
        avalon_unreal.imprint(container_path,
                              {"_id": str(representation["_id"])})
示例#9
0
    def update(self, container, representation):
        """ Switch asset or change version """
        context = representation.get("context", {})
        _ = container.pop("layer")

        # without iterator number (_001, 002...)
        namespace_from_container = re.sub(r'_\d{3}$', '',
                                          container["namespace"])
        comp_name = "{}_{}".format(context["asset"], context["subset"])

        # switching assets
        if namespace_from_container != comp_name:
            items = stub.get_items(comps=True)
            existing_items = [layer.name for layer in items]
            comp_name = get_unique_layer_name(
                existing_items, "{}_{}".format(context["asset"],
                                               context["subset"]))
        else:  # switching version - keep same name
            comp_name = container["namespace"]

        path = api.get_representation_path(representation)

        layers = get_background_layers(path)
        comp = stub.reload_background(container["members"][1],
                                      stub.LOADED_ICON + comp_name, layers)

        # update container
        container["representation"] = str(representation["_id"])
        container["name"] = context["subset"]
        container["namespace"] = comp_name
        container["members"] = comp.members

        stub.imprint(comp, container)
示例#10
0
    def update(self, container, representation):
        """Update the Loader's path

        Nuke automatically tries to reset some variables when changing
        the loader's path to a new file. These automatic changes are to its
        inputs:

        """

        from avalon.nuke import (
            update_container
        )

        node = nuke.toNode(container['objectName'])

        root = api.get_representation_path(representation).replace("\\", "/")

        # Get start frame from version data
        version = io.find_one({
            "type": "version",
            "_id": representation["parent"]
        })

        # get all versions in list
        versions = io.find({
            "type": "version",
            "parent": version["parent"]
        }).distinct('name')

        max_version = max(versions)

        updated_dict = {}
        updated_dict.update({
            "representation": str(representation["_id"]),
            "frameEnd": version["data"].get("frameEnd"),
            "version": version.get("name"),
            "colorspace": version["data"].get("colorspace"),
            "source": version["data"].get("source"),
            "handles": version["data"].get("handles"),
            "fps": version["data"].get("fps"),
            "author": version["data"].get("author"),
            "outputDir": version["data"].get("outputDir"),
        })

        # Update the imprinted representation
        update_container(
            node,
            updated_dict
        )

        node["file"].setValue(root)

        # change color of node
        if version.get("name") not in [max_version]:
            node["tile_color"].setValue(int("0xd84f20ff", 16))
        else:
            node["tile_color"].setValue(int("0xff0ff0ff", 16))

        self.log.info("udated to version: {}".format(version.get("name")))
示例#11
0
    def update(self, container: Dict, representation: Dict):
        """Update the loaded asset.

        This will remove all objects of the current collection, load the new
        ones and add them to the collection.
        If the objects of the collection are used in another collection they
        will not be removed, only unlinked. Normally this should not be the
        case though.

        Warning:
            No nested collections are supported at the moment!
        """
        collection = bpy.data.collections.get(container["objectName"])
        libpath = Path(api.get_representation_path(representation))
        extension = libpath.suffix.lower()

        logger.debug(
            "Container: %s\nRepresentation: %s",
            pformat(container, indent=2),
            pformat(representation, indent=2),
        )

        assert collection, (
            f"The asset is not loaded: {container['objectName']}")
        assert not (collection.children), (
            "Nested collections are not supported.")
        assert libpath, (
            "No existing library file found for {container['objectName']}")
        assert libpath.is_file(), (f"The file doesn't exist: {libpath}")
        assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
            f"Unsupported file: {libpath}")

        collection_metadata = collection.get(blender.pipeline.AVALON_PROPERTY)
        collection_libpath = collection_metadata["libpath"]
        objects = collection_metadata["objects"]
        lib_container = collection_metadata["lib_container"]

        normalized_collection_libpath = (str(
            Path(bpy.path.abspath(collection_libpath)).resolve()))
        normalized_libpath = (str(
            Path(bpy.path.abspath(str(libpath))).resolve()))
        logger.debug(
            "normalized_collection_libpath:\n  %s\nnormalized_libpath:\n  %s",
            normalized_collection_libpath,
            normalized_libpath,
        )
        if normalized_collection_libpath == normalized_libpath:
            logger.info("Library already loaded, not updating...")
            return

        self._remove(self, objects, lib_container)

        objects_list = self._process(self, str(libpath), lib_container,
                                     collection.name)

        # Save the list of objects in the metadata container
        collection_metadata["objects"] = objects_list
        collection_metadata["libpath"] = str(libpath)
        collection_metadata["representation"] = str(representation["_id"])
示例#12
0
def assign_look_by_version(nodes, version_id):
    """Assign nodes a specific published look version by id.

    This assumes the nodes correspond with the asset.

    Args:
        nodes(list): nodes to assign look to
        version_id (bson.ObjectId): database id of the version

    Returns:
        None
    """

    # Get representations of shader file and relationships
    look_representation = io.find_one({
        "type": "representation",
        "parent": version_id,
        "name": "ma"
    })

    json_representation = io.find_one({
        "type": "representation",
        "parent": version_id,
        "name": "json"
    })

    # See if representation is already loaded, if so reuse it.
    host = api.registered_host()
    representation_id = str(look_representation['_id'])
    for container in host.ls():
        if (container['loader'] == "LookLoader"
                and container['representation'] == representation_id):
            log.info("Reusing loaded look ..")
            container_node = container['objectName']
            break
    else:
        log.info("Using look for the first time ..")

        # Load file
        loaders = api.loaders_from_representation(api.discover(api.Loader),
                                                  representation_id)
        Loader = next((i for i in loaders if i.__name__ == "LookLoader"), None)
        if Loader is None:
            raise RuntimeError("Could not find LookLoader, this is a bug")

        # Reference the look file
        with maya.maintained_selection():
            container_node = pipeline.load(Loader, look_representation)

    # Get container members
    shader_nodes = cmds.sets(container_node, query=True)

    # Load relationships
    shader_relation = api.get_representation_path(json_representation)
    with open(shader_relation, "r") as f:
        relationships = json.load(f)

    # Assign relationships
    apply_shaders(relationships, shader_nodes, nodes)
示例#13
0
    def update(self, container, representation):
        """Update the Loader's path

        Fusion automatically tries to reset some variables when changing
        the loader's path to a new file. These automatic changes are to its
        inputs:

        """

        from avalon.nuke import (
            viewer_update_and_undo_stop,
            ls_img_sequence,
            update_container
        )
        log.info("this i can see")
        node = container["_tool"]
        # TODO: prepare also for other readers img/geo/camera
        assert node.Class() == "Reader", "Must be Reader"

        root = api.get_representation_path(representation)
        file = ls_img_sequence(os.path.dirname(root), one=True)

        # Get start frame from version data
        version = io.find_one({"type": "version",
                               "_id": representation["parent"]})
        start = version["data"].get("startFrame")
        if start is None:
            log.warning("Missing start frame for updated version"
                        "assuming starts at frame 0 for: "
                        "{} ({})".format(node['name'].value(), representation))
            start = 0

        with viewer_update_and_undo_stop():

            # Update the loader's path whilst preserving some values
            with preserve_trim(node):
                with preserve_inputs(node,
                                     knobs=["file",
                                            "first",
                                            "last",
                                            "originfirst",
                                            "originlast",
                                            "frame_mode",
                                            "frame"]):
                    node["file"] = file["path"]

            # Set the global in to the start frame of the sequence
            global_in_changed = loader_shift(node, start, relative=False)
            if global_in_changed:
                # Log this change to the user
                log.debug("Changed '{}' global in:"
                          " {:d}".format(node['name'].value(), start))

            # Update the imprinted representation
            update_container(
                node,
                {"representation": str(representation["_id"])}
            )
示例#14
0
    def update(self, container, representation):
        layer = container.pop("layer")

        with photoshop.maintained_selection():
            photoshop.replace_smart_object(
                layer, api.get_representation_path(representation))

        photoshop.imprint(layer,
                          {"representation": str(representation["_id"])})
示例#15
0
    def get_file_paths(self, session, event):
        """Get file paths from selected components."""

        link = session.get("Component",
                           list(event["data"]["values"].values())
                           [0])["version"]["asset"]["parent"]["link"][0]
        project = session.get(link["type"], link["id"])
        os.environ["AVALON_PROJECT"] = project["name"]
        api.Session["AVALON_PROJECT"] = project["name"]
        io.install()

        location = ftrack_api.Session().pick_location()

        paths = []
        for parent_name in sorted(event["data"]["values"].keys()):
            component = session.get("Component",
                                    event["data"]["values"][parent_name])

            # Newer publishes have the source referenced in Ftrack.
            online_source = False
            for neighbour_component in component["version"]["components"]:
                if neighbour_component["name"] != "ftrackreview-mp4_src":
                    continue

                paths.append(location.get_filesystem_path(neighbour_component))
                online_source = True

            if online_source:
                continue

            asset = io.find_one({"type": "asset", "name": parent_name})
            subset = io.find_one({
                "type": "subset",
                "name": component["version"]["asset"]["name"],
                "parent": asset["_id"]
            })
            version = io.find_one({
                "type": "version",
                "name": component["version"]["version"],
                "parent": subset["_id"]
            })
            representation = io.find_one({
                "type": "representation",
                "parent": version["_id"],
                "name": component["file_type"][1:]
            })
            if representation is None:
                representation = io.find_one({
                    "type": "representation",
                    "parent": version["_id"],
                    "name": "preview"
                })
            paths.append(api.get_representation_path(representation))

        return paths
示例#16
0
    def load(self, context, name=None, namespace=None, data=None):
        wav_file = api.get_representation_path(context["representation"])
        harmony.send({
            "function": func,
            "args": [context["subset"]["name"], wav_file]
        })

        subset_name = context["subset"]["name"]

        return harmony.containerise(subset_name, namespace, subset_name,
                                    context, self.__class__.__name__)
示例#17
0
    def update(self, container, representation):
        name = container["asset_name"]
        source_path = api.get_representation_path(representation)
        destination_path = container["namespace"]

        task = unreal.AssetImportTask()

        task.set_editor_property('filename', source_path)
        task.set_editor_property('destination_path', destination_path)
        task.set_editor_property('destination_name', name)
        task.set_editor_property('replace_existing', True)
        task.set_editor_property('automated', True)
        task.set_editor_property('save', True)

        # set import options here
        options = unreal.FbxImportUI()
        options.set_editor_property('import_as_skeletal', True)
        options.set_editor_property('import_animations', False)
        options.set_editor_property('import_mesh', True)
        options.set_editor_property('import_materials', True)
        options.set_editor_property('import_textures', True)
        options.set_editor_property('skeleton', None)
        options.set_editor_property('create_physics_asset', False)

        options.set_editor_property('mesh_type_to_import',
                                    unreal.FBXImportType.FBXIT_SKELETAL_MESH)

        options.skeletal_mesh_import_data.set_editor_property(
            'import_content_type', unreal.FBXImportContentType.FBXICT_ALL)
        # set to import normals, otherwise Unreal will compute them
        # and it will take a long time, depending on the size of the mesh
        options.skeletal_mesh_import_data.set_editor_property(
            'normal_import_method',
            unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS)

        task.options = options
        # do import fbx and replace existing data
        unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(
            [task])  # noqa: E501
        container_path = "{}/{}".format(container["namespace"],
                                        container["objectName"])
        # update metadata
        unreal_pipeline.imprint(
            container_path, {
                "representation": str(representation["_id"]),
                "parent": str(representation["parent"])
            })

        asset_content = unreal.EditorAssetLibrary.list_assets(
            destination_path, recursive=True, include_folder=True)

        for a in asset_content:
            unreal.EditorAssetLibrary.save_asset(a)
示例#18
0
    def update(self, container, representation):
        image_plane_shape = None
        for node in pc.PyNode(container["objectName"]).members():
            if node.nodeType() == "imagePlane":
                image_plane_shape = node

        assert image_plane_shape is not None, "Image plane not found."

        path = api.get_representation_path(representation)
        image_plane_shape.imageName.set(path)
        cmds.setAttr(container["objectName"] + ".representation",
                     str(representation["_id"]),
                     type="string")
示例#19
0
def update_package(set_container, representation):
    """Update any matrix changes in the scene based on the new data

    Args:
        set_container (dict): container data from `ls()`
        representation (dict): the representation document from the database

    Returns:
        None

    """

    # Load the original package data
    current_representation = io.find_one({
        "_id":
        io.ObjectId(set_container['representation']),
        "type":
        "representation"
    })

    current_file = api.get_representation_path(current_representation)
    assert current_file.endswith(".json")
    with open(current_file, "r") as fp:
        current_data = json.load(fp)

    # Load the new package data
    new_file = api.get_representation_path(representation)
    assert new_file.endswith(".json")
    with open(new_file, "r") as fp:
        new_data = json.load(fp)

    # Update scene content
    containers = get_contained_containers(set_container)
    update_scene(set_container, containers, current_data, new_data, new_file)

    # TODO: This should be handled by the pipeline itself
    cmds.setAttr(set_container['objectName'] + ".representation",
                 str(representation['_id']),
                 type="string")
示例#20
0
文件: load_clip.py 项目: 3dzayn/pype
    def update(self, container, representation):
        """ Updating previously loaded clips
        """

        # load clip to timeline and get main variables
        context = deepcopy(representation["context"])
        context.update({"representation": representation})
        name = container['name']
        namespace = container['namespace']
        timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace)
        timeline_item = timeline_item_data["clip"]["item"]
        version = io.find_one({
            "type": "version",
            "_id": representation["parent"]
        })
        version_data = version.get("data", {})
        version_name = version.get("name", None)
        colorspace = version_data.get("colorspace", None)
        object_name = "{}_{}".format(name, namespace)
        self.fname = api.get_representation_path(representation)
        context["version"] = {"data": version_data}

        loader = resolve.ClipLoader(self, context)
        timeline_item = loader.update(timeline_item)

        # add additional metadata from the version to imprint Avalon knob
        add_keys = [
            "frameStart", "frameEnd", "source", "author",
            "fps", "handleStart", "handleEnd"
        ]

        # move all version data keys to tag data
        data_imprint = {}
        for key in add_keys:
            data_imprint.update({
                key: version_data.get(key, str(None))
            })

        # add variables related to version context
        data_imprint.update({
            "representation": str(representation["_id"]),
            "version": version_name,
            "colorspace": colorspace,
            "objectName": object_name
        })

        # update color of clip regarding the version order
        self.set_item_color(timeline_item, version)

        return resolve.update_container(timeline_item, data_imprint)
示例#21
0
    def update(self, container, representation):

        node = container["node"]

        # Update the file path
        file_path = api.get_representation_path(representation)
        file_path = file_path.replace("\\", "/")

        # Update attributes
        node.setParms({"fileName": file_path,
                       "representation": str(representation["_id"])})

        # Rebuild
        node.parm("buildHierarchy").pressButton()
示例#22
0
    def update(self, container, representation):
        name = container["asset_name"]
        source_path = api.get_representation_path(representation)
        destination_path = container["namespace"]

        task = unreal.AssetImportTask()
        task.options = unreal.FbxImportUI()

        task.set_editor_property('filename', source_path)
        task.set_editor_property('destination_path', destination_path)
        # strip suffix
        task.set_editor_property('destination_name', name)
        task.set_editor_property('replace_existing', True)
        task.set_editor_property('automated', True)
        task.set_editor_property('save', False)

        # set import options here
        task.options.set_editor_property('automated_import_should_detect_type',
                                         True)
        task.options.set_editor_property('original_import_type',
                                         unreal.FBXImportType.FBXIT_ANIMATION)
        task.options.set_editor_property('import_mesh', False)
        task.options.set_editor_property('import_animations', True)

        task.options.skeletal_mesh_import_data.set_editor_property(
            'import_content_type',
            unreal.FBXImportContentType.FBXICT_SKINNING_WEIGHTS)

        skeletal_mesh = unreal.EditorAssetLibrary.load_asset(
            container.get('namespace') + "/" + container.get('asset_name'))
        skeleton = skeletal_mesh.get_editor_property('skeleton')
        task.options.set_editor_property('skeleton', skeleton)

        # do import fbx and replace existing data
        unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
        container_path = "{}/{}".format(container["namespace"],
                                        container["objectName"])
        # update metadata
        unreal_pipeline.imprint(
            container_path, {
                "representation": str(representation["_id"]),
                "parent": str(representation["parent"])
            })

        asset_content = unreal.EditorAssetLibrary.list_assets(
            destination_path, recursive=True, include_folder=True)

        for a in asset_content:
            unreal.EditorAssetLibrary.save_asset(a)
示例#23
0
    def update(self, container, representation):
        """ Updating previously loaded clips
        """

        # load clip to timeline and get main variables
        name = container['name']
        namespace = container['namespace']
        track_item = phiero.get_track_items(
            track_item_name=namespace)
        version = io.find_one({
            "type": "version",
            "_id": representation["parent"]
        })
        version_data = version.get("data", {})
        version_name = version.get("name", None)
        colorspace = version_data.get("colorspace", None)
        object_name = "{}_{}".format(name, namespace)
        file = api.get_representation_path(representation).replace("\\", "/")

        # reconnect media to new path
        track_item.source().reconnectMedia(file)

        # add additional metadata from the version to imprint Avalon knob
        add_keys = [
            "frameStart", "frameEnd", "source", "author",
            "fps", "handleStart", "handleEnd"
        ]

        # move all version data keys to tag data
        data_imprint = {}
        for key in add_keys:
            data_imprint.update({
                key: version_data.get(key, str(None))
            })

        # add variables related to version context
        data_imprint.update({
            "representation": str(representation["_id"]),
            "version": version_name,
            "colorspace": colorspace,
            "objectName": object_name
        })

        # update color of clip regarding the version order
        self.set_item_color(track_item, version)

        return phiero.update_container(track_item, data_imprint)
示例#24
0
def get_resources(version, extension=None):
    """Get the files from the specific version."""
    query = {"type": "representation", "parent": version["_id"]}
    if extension:
        query["name"] = extension

    representation = io.find_one(query)
    assert representation, "This is a bug"

    directory = api.get_representation_path(representation)
    print("Source: ", directory)
    resources = sorted([
        os.path.normpath(os.path.join(directory, fname))
        for fname in os.listdir(directory)
    ])

    return resources
示例#25
0
    def update(self, container, representation):

        import pymel.core as pm

        path = api.get_representation_path(representation)

        # Update the standin
        members = pm.sets(container['objectName'], query=True)
        standins = pm.ls(members, type="AiStandIn", long=True)

        assert len(caches) == 1, "This is a bug"

        for standin in standins:
            standin.cacheFileName.set(path)

        container = pm.PyNode(container["objectName"])
        container.representation.set(str(representation["_id"]))
示例#26
0
    def update(self, container, representation):

        node = container["node"]
        try:
            file_node = next(n for n in node.children()
                             if n.type().name() == "file")
        except StopIteration:
            self.log.error("Could not find node of type `alembic`")
            return

        # Update the file path
        file_path = api.get_representation_path(representation)
        file_path = self.format_path(file_path)

        file_node.setParms({"fileName": file_path})

        # Update attribute
        node.setParms({"representation": str(representation["_id"])})
示例#27
0
    def update(self, container, representation):

        import maya.cmds as cmds

        path = api.get_representation_path(representation)

        # Find VRayVolumeGrid
        members = cmds.sets(container['objectName'], query=True)
        grid_nodes = cmds.ls(members, type="aiVolume", long=True)
        assert len(grid_nodes) == 1, "This is a bug"

        # Update the VRayVolumeGrid
        self._apply_settings(grid_nodes[0], path=path)

        # Update container representation
        cmds.setAttr(container["objectName"] + ".representation",
                     str(representation["_id"]),
                     type="string")
示例#28
0
    def update(self, container, representation):

        import maya.cmds as cmds

        path = api.get_representation_path(representation)

        # Update the cache
        members = cmds.sets(container['objectName'], query=True)
        caches = cmds.ls(members, type="gpuCache", long=True)

        assert len(caches) == 1, "This is a bug"

        for cache in caches:
            cmds.setAttr(cache + ".cacheFileName", path, type="string")

        cmds.setAttr(container["objectName"] + ".representation",
                     str(representation["_id"]),
                     type="string")
示例#29
0
    def update(self, container, representation):
        node = container.pop("node")

        collections, remainder = clique.assemble(
            os.listdir(
                os.path.dirname(api.get_representation_path(representation))))
        files = []
        for f in list(collections[0]):
            files.append(
                os.path.join(os.path.dirname(self.fname),
                             f).replace("\\", "/"))

        harmony.send({
            "function": copy_files + replace_files,
            "args": [files, node, 1]
        })

        harmony.imprint(node, {"representation": str(representation["_id"])})
示例#30
0
    def load_palette(self, representation):
        subset_name = representation["context"]["subset"]
        name = subset_name.replace("palette", "")

        # Overwrite palette on disk.
        scene_path = harmony.send({"function":
                                   "scene.currentProjectPath"})["result"]
        src = api.get_representation_path(representation)
        dst = os.path.join(scene_path, "palette-library",
                           "{}.plt".format(name))
        shutil.copy(src, dst)

        harmony.save_scene()

        msg = "Updated {}.".format(subset_name)
        msg += " You need to reload the scene to see the changes."

        harmony.send({"function": "PypeHarmony.message", "args": msg})
        return name