Exemple #1
0
def update_frame_range(comp, representations):
    """Update the frame range of the comp and render length

    The start and end frame are based on the lowest start frame and the highest
    end frame

    Args:
        comp (object): current focused comp
        representations (list) collection of dicts

    Returns:
        None

    """

    version_ids = [r["parent"] for r in representations]
    versions = io.find({"type": "version", "_id": {"$in": version_ids}})
    versions = list(versions)

    versions = [v for v in versions
                if v["data"].get("startFrame", None) is not None]

    if not versions:
        log.warning("No versions loaded to match frame range to.\n")
        return

    start = min(v["data"]["frameStart"] for v in versions)
    end = max(v["data"]["frameEnd"] for v in versions)

    fusion_lib.update_frame_range(start, end, comp=comp)
Exemple #2
0
    def refresh(self):
        """Refresh the data for the model."""

        self.clear()
        self.beginResetModel()

        # Get all assets in current silo sorted by name
        db_assets = io.find({"type": "asset"}).sort("name", 1)
        silos = db_assets.distinct("silo") or None
        # if any silo is set to None then it's expected it should not be used
        if silos and None in silos:
            silos = None

        # Group the assets by their visual parent's id
        assets_by_parent = collections.defaultdict(list)
        for asset in db_assets:
            parent_id = (
                asset.get("data", {}).get("visualParent") or
                asset.get("silo")
            )
            assets_by_parent[parent_id].append(asset)

        # Build the hierarchical tree items recursively
        self._add_hierarchy(
            assets_by_parent,
            parent=None,
            silos=silos
        )

        self.endResetModel()
    def fill_latest_versions(self, context):
        """Try to find latest version for each instance's subset.

        Key "latestVersion" is always set to latest version or `None`.

        Args:
            context (pyblish.Context)

        Returns:
            None

        """
        self.log.debug("Qeurying latest versions for instances.")

        hierarchy = {}
        subset_filters = []
        for instance in context:
            # Make sure `"latestVersion"` key is set
            latest_version = instance.data.get("latestVersion")
            instance.data["latestVersion"] = latest_version

            # Skip instances withou "assetEntity"
            asset_doc = instance.data.get("assetEntity")
            if not asset_doc:
                continue

            # Store asset ids and subset names for queries
            asset_id = asset_doc["_id"]
            subset_name = instance.data["subset"]

            # Prepare instance hiearchy for faster filling latest versions
            if asset_id not in hierarchy:
                hierarchy[asset_id] = {}
            if subset_name not in hierarchy[asset_id]:
                hierarchy[asset_id][subset_name] = []
            hierarchy[asset_id][subset_name].append(instance)
            subset_filters.append({"parent": asset_id, "name": subset_name})

        subset_docs = []
        if subset_filters:
            subset_docs = list(
                io.find({
                    "type": "subset",
                    "$or": subset_filters
                }))

        subset_ids = [subset_doc["_id"] for subset_doc in subset_docs]

        last_version_by_subset_id = self._query_last_versions(subset_ids)
        for subset_doc in subset_docs:
            subset_id = subset_doc["_id"]
            last_version = last_version_by_subset_id.get(subset_id)
            if last_version is None:
                continue

            asset_id = subset_doc["parent"]
            subset_name = subset_doc["name"]
            _instances = hierarchy[asset_id][subset_name]
            for _instance in _instances:
                _instance.data["latestVersion"] = last_version
def get_protected(instance):
    from avalon import io

    protected = dict()

    asset = instance.context.data["assetDoc"]
    subset = io.find_one({"type": "subset",
                          "parent": asset["_id"],
                          "name": instance.data["subset"]})

    if subset is not None:
        versions = io.find({"type": "version", "parent": subset["_id"]},
                           sort=[("name", -1)])

        for version in versions:
            repr = io.find_one({"type": "representation",
                                "parent": version["_id"],
                                "name": "mayaBinary"})

            lock_list = repr["data"].get("modelProtected")
            if lock_list is None:
                continue

            profile = repr["data"].get("modelProfile", dict())
            for id in lock_list:
                data = profile[id][0]  # Should have only one mesh per id
                name = data.pop("hierarchy")
                protected[name] = data

    return protected
Exemple #5
0
    def get_invalid(cls, instance):

        invalid = []

        # Get all id required nodes
        id_required_nodes = lib.get_id_required_nodes(referenced_nodes=True,
                                                      nodes=instance[:])

        # check ids against database ids
        db_asset_ids = io.find({"type": "asset"}).distinct("_id")
        db_asset_ids = set(str(i) for i in db_asset_ids)

        # Get all asset IDs
        for node in id_required_nodes:
            cb_id = lib.get_id(node)

            # Ignore nodes without id, those are validated elsewhere
            if not cb_id:
                continue

            asset_id = cb_id.split(":", 1)[0]
            if asset_id not in db_asset_ids:
                cls.log.error("`%s` has unassociated asset ID" % node)
                invalid.append(node)

        return invalid
Exemple #6
0
def list_looks(asset_id):
    """Return all look subsets from database for the given asset
    """
    look_subsets = list(
        io.find({
            "parent": asset_id,
            "type": "subset",
            "name": {
                "$regex": "look*"
            },
            # Ignore looks that have been dump to trash
            "data.subsetGroup": {
                "$ne": "Trash Bin"
            }
        }))
    for look in look_subsets:
        # Get the latest version of this look subset
        version = io.find_one({
            "type": "version",
            "parent": look["_id"]
        },
                              sort=[("name", -1)])
        look["version"] = version["name"]
        look["versionId"] = version["_id"]

    return look_subsets
Exemple #7
0
        def texture_lookup(version_id):
            representations = set()

            dependent = "data.dependents.%s" % version_id
            filter = {
                "type": "version",
                "data.families": "reveries.texture",
                dependent: {
                    "$exists": True
                },
            }
            version = io.find_one(filter)

            if version is not None:
                representation = io.find_one({"parent": version["_id"]})
                representations.add(str(representation["_id"]))
                # Patching textures
                for pre_version in io.find(
                    {
                        "parent": version["parent"],
                        "name": {
                            "$lt": version["name"]
                        }
                    },
                        sort=[("name", -1)]):

                    pre_repr = io.find_one({"parent": pre_version["_id"]})

                    if "fileInventory" in pre_repr["data"]:
                        representations.add(str(pre_repr["_id"]))
                    else:
                        break

            return representations
Exemple #8
0
    def setEditorData(self, editor, index):

        editor.clear()

        # Current value of the index
        value = index.data(QtCore.Qt.DisplayRole)
        assert isinstance(value, int), "Version is not `int`"

        # Add all available versions to the editor
        node = index.data(self._noderole)
        parent_id = node['version_document']['parent']
        versions = io.find({
            "type": "version",
            "parent": parent_id
        },
                           sort=[("name", 1)])
        index = 0
        for i, version in enumerate(versions):
            label = self._format_version(version['name'])
            editor.addItem(label, userData=version)

            if version['name'] == value:
                index = i

        editor.setCurrentIndex(index)
Exemple #9
0
    def process(self, context):
        asset_builds = {}
        for asset in io.find({"type": "asset"}):
            if asset["data"]["entityType"] == "AssetBuild":
                self.log.debug("Found \"{}\" in database.".format(asset))
                asset_builds[asset["name"]] = asset

        for instance in context:
            if instance.data["family"] != "clip":
                continue

            # Exclude non-tagged instances.
            tagged = False
            asset_names = []
            for tag in instance.data["tags"]:
                family = dict(tag["metadata"]).get("tag.family", "")
                if family.lower() == "assetbuild":
                    asset_names.append(tag["name"])
                    tagged = True

            if not tagged:
                self.log.debug("Skipping \"{}\" because its not tagged with "
                               "\"assetbuild\"".format(instance))
                continue

            # Collect asset builds.
            data = {"assetbuilds": []}
            for name in asset_names:
                data["assetbuilds"].append(asset_builds[name])
            self.log.debug("Found asset builds: {}".format(
                data["assetbuilds"]))

            instance.data.update(data)
Exemple #10
0
def get_versions_from_sourcefile(source, project):
    """Get version documents by the source path

    By matching the path with field `version.data.source` to query latest
    versions.

    Args:
        source (str): A path string where subsets been published from
        project (str): Project name

    """
    source = source.split(project, 1)[-1].replace("\\", "/")
    source = {"$regex": "/*{}".format(source), "$options": "i"}

    cursor = io.find({"type": "version",
                      "data.source": source},
                     sort=[("name", -1)])
    # (NOTE) Each version usually coming from different source file, but
    #        let's not making this assumtion.
    #        So here we filter out other versions that belongs to the same
    #        subset.
    subsets = set()
    for version in cursor:
        if version["parent"] not in subsets:
            subsets.add(version["parent"])

            yield version

        else:
            continue
Exemple #11
0
 def _asset_docs_by_parent_id(self, instance):
     # Query all assets for project and store them by parent's id to list
     asset_docs_by_parent_id = collections.defaultdict(list)
     for asset_doc in io.find({"type": "asset"}):
         parent_id = asset_doc["data"]["visualParent"]
         asset_docs_by_parent_id[parent_id].append(asset_doc)
     return asset_docs_by_parent_id
Exemple #12
0
    def update(self, container, representation):
        """Update the Loader's path

        Nuke automatically tries to reset some variables when changing
        the loader's path to a new file. These automatic changes are to its
        inputs:

        """

        from avalon.nuke import (
            update_container
        )

        node = nuke.toNode(container['objectName'])

        root = api.get_representation_path(representation).replace("\\", "/")

        # Get start frame from version data
        version = io.find_one({
            "type": "version",
            "_id": representation["parent"]
        })

        # get all versions in list
        versions = io.find({
            "type": "version",
            "parent": version["parent"]
        }).distinct('name')

        max_version = max(versions)

        updated_dict = {}
        updated_dict.update({
            "representation": str(representation["_id"]),
            "frameEnd": version["data"].get("frameEnd"),
            "version": version.get("name"),
            "colorspace": version["data"].get("colorspace"),
            "source": version["data"].get("source"),
            "handles": version["data"].get("handles"),
            "fps": version["data"].get("fps"),
            "author": version["data"].get("author"),
            "outputDir": version["data"].get("outputDir"),
        })

        # Update the imprinted representation
        update_container(
            node,
            updated_dict
        )

        node["file"].setValue(root)

        # change color of node
        if version.get("name") not in [max_version]:
            node["tile_color"].setValue(int("0xd84f20ff", 16))
        else:
            node["tile_color"].setValue(int("0xff0ff0ff", 16))

        self.log.info("udated to version: {}".format(version.get("name")))
Exemple #13
0
    def on_silo_changed(self, index):
        name = model.data(index, "name")
        api.Session["AVALON_SILO"] = name

        frame = self.current_frame()

        self.docs = sorted(
            io.find({
                "type": "asset",
                "parent": frame["project"],
                "silo": name
            }),
            # Hard-sort by group
            # TODO(marcus): Sorting should really happen in
            # the model, via e.g. a Proxy.
            key=lambda item: (
                # Sort by group
                item["data"].get(
                    "group",

                    # Put items without a
                    # group at the top
                    "0"),

                # Sort inner items by name
                item["name"]))
        valid_docs = []
        for doc in self.docs:
            # Discard hidden items
            if not doc["data"].get("visible", True):
                continue

            data = {
                "_id": doc["_id"],
                "name": doc["name"],
                "icon": DEFAULTS["icon"]["asset"]
            }
            data.update(doc["data"])

            if "visualParent" in doc["data"]:
                vis_par = doc["data"]["visualParent"]
                if vis_par is not None:
                    continue

            if "label" not in data:
                data["label"] = doc["name"]
            valid_docs.append(data)

        frame["environment"]["silo"] = name
        frame["name"] = name
        frame["type"] = "silo"

        self._frames.append(frame)
        self._model.push(valid_docs)
        self.pushed.emit(name)
    def fill_missing_asset_docs(self, context):
        self.log.debug("Qeurying asset documents for instances.")

        context_asset_doc = context.data["assetEntity"]

        instances_with_missing_asset_doc = collections.defaultdict(list)
        for instance in context:
            instance_asset_doc = instance.data.get("assetEntity")
            _asset_name = instance.data["asset"]

            # There is possibility that assetEntity on instance is already set
            # which can happen in standalone publisher
            if (instance_asset_doc
                    and instance_asset_doc["name"] == _asset_name):
                continue

            # Check if asset name is the same as what is in context
            # - they may be different, e.g. in NukeStudio
            if context_asset_doc["name"] == _asset_name:
                instance.data["assetEntity"] = context_asset_doc

            else:
                instances_with_missing_asset_doc[_asset_name].append(instance)

        if not instances_with_missing_asset_doc:
            self.log.debug("All instances already had right asset document.")
            return

        asset_names = list(instances_with_missing_asset_doc.keys())
        self.log.debug("Querying asset documents with names: {}".format(
            ", ".join(["\"{}\"".format(name) for name in asset_names])))
        asset_docs = io.find({"type": "asset", "name": {"$in": asset_names}})
        asset_docs_by_name = {
            asset_doc["name"]: asset_doc
            for asset_doc in asset_docs
        }

        not_found_asset_names = []
        for asset_name, instances in instances_with_missing_asset_doc.items():
            asset_doc = asset_docs_by_name.get(asset_name)
            if not asset_doc:
                not_found_asset_names.append(asset_name)
                continue

            for _instance in instances:
                _instance.data["assetEntity"] = asset_doc

        if not_found_asset_names:
            joined_asset_names = ", ".join(
                ["\"{}\"".format(name) for name in not_found_asset_names])
            self.log.warning(("Not found asset documents with names \"{}\"."
                              ).format(joined_asset_names))
Exemple #15
0
def test_save_idempotent():
    """Saving many times doesn't duplicate assets"""

    inventory.save(name=self._project["name"],
                   config=self._config,
                   inventory=self._inventory)

    assert_equals(
        io.find({
            "type": "asset"
        }).count(),
        len(self._inventory["assets"]) + len(self._inventory["film"]))

    inventory.save(name=self._project["name"],
                   config=self._config,
                   inventory=self._inventory)

    assert_equals(
        io.find({
            "type": "asset"
        }).count(),
        len(self._inventory["assets"]) + len(self._inventory["film"]))
Exemple #16
0
def list_looks(asset_id):
    """Return all look subsets for the given asset

    This assumes all look subsets start with "look*" in their names.
    """

    # # get all subsets with look leading in
    # the name associated with the asset
    subset = io.find({"parent": bson.ObjectId(asset_id),
                      "type": "subset",
                      "name": {"$regex": "look*"}})

    return list(subset)
Exemple #17
0
    def show_version_dialog(self, items):
        """Create a dialog with the available versions for the selected file

        :param items: list of items to run the "set_version" for
        :type items: list

        :returns: None
        """

        active = items[-1]

        # Get available versions for active representation
        representation_id = io.ObjectId(active["representation"])
        representation = io.find_one({"_id": representation_id})
        version = io.find_one({"_id": representation["parent"]})

        versions = io.find({"parent": version["parent"]}, sort=[("name", 1)])
        versions = list(versions)

        current_version = active["version"]

        # Get index among the listed versions
        index = len(versions) - 1
        for i, version in enumerate(versions):
            if version["name"] == current_version:
                index = i
                break

        versions_by_label = dict()
        labels = []
        for version in versions:
            label = "v{0:03d}".format(version["name"])
            labels.append(label)
            versions_by_label[label] = version

        label, state = QtWidgets.QInputDialog.getItem(self,
                                                      "Set version..",
                                                      "Set version number "
                                                      "to",
                                                      labels,
                                                      current=index,
                                                      editable=False)
        if not state:
            return

        if label:
            version = versions_by_label[label]["name"]
            for item in items:
                api.update(item, version)
            # refresh model when done
            self.data_changed.emit()
Exemple #18
0
    def current_hero_ents(self, version):
        hero_version = io.find_one({
            "parent": version["parent"],
            "type": "hero_version"
        })

        if not hero_version:
            return (None, [])

        hero_repres = list(io.find({
            "parent": hero_version["_id"],
            "type": "representation"
        }))
        return (hero_version, hero_repres)
Exemple #19
0
    def current_master_ents(self, version):
        master_version = io.find_one({
            "parent": version["parent"],
            "type": "master_version"
        })

        if not master_version:
            return (None, [])

        master_repres = list(io.find({
            "parent": master_version["_id"],
            "type": "representation"
        }))
        return (master_version, master_repres)
Exemple #20
0
def checkInventoryVersions():
    """
    Actiual version idetifier of Loaded containers

    Any time this function is run it will check all nodes and filter only
    Loader nodes for its version. It will get all versions from database
    and check if the node is having actual version. If not then it will color
    it to red.
    """
    # TODO: make it for all nodes not just Read (Loader

    # get all Loader nodes by avalon attribute metadata
    for each in nuke.allNodes():
        if each.Class() == 'Read':
            container = avalon.nuke.parse_container(each)

            if container:
                node = container["_node"]
                avalon_knob_data = avalon.nuke.get_avalon_knob_data(
                    node, ['avalon:', 'ak:'])

                # get representation from io
                representation = io.find_one({
                    "type":
                    "representation",
                    "_id":
                    io.ObjectId(avalon_knob_data["representation"])
                })

                # Get start frame from version data
                version = io.find_one({
                    "type": "version",
                    "_id": representation["parent"]
                })

                # get all versions in list
                versions = io.find({
                    "type": "version",
                    "parent": version["parent"]
                }).distinct('name')

                max_version = max(versions)

                # check the available version and do match
                # change color of node if not max verion
                if version.get("name") not in [max_version]:
                    node["tile_color"].setValue(int("0xd84f20ff", 16))
                else:
                    node["tile_color"].setValue(int("0x4ecd25ff", 16))
Exemple #21
0
def load(name):
    """Read project called `name` from database

    Arguments:
        name (str): Project name

    """

    print("Loading .inventory.toml and .config.toml..")

    project = io.find_one({"type": "project"})

    if project is None:
        msg = "'{0}' not found, try --init to start a new project".format(name)

        projects = ""
        for project in io.projects():
            projects += "\n- {0}".format(project["name"])

        if projects:
            msg += (
                ", or load a project from the database.\nProjects:{1}".format(
                    name, projects))
        else:
            msg += "."

        raise Exception(msg)

    else:
        config = project["config"]
        inventory = {"schema": "avalon-core:inventory-1.0"}
        for asset in io.find({"type": "asset", "parent": project["_id"]}):
            silo = asset["silo"]
            data = asset["data"]

            data.pop("visualParent", None)  # Hide from manual editing

            if silo not in inventory:
                inventory[silo] = list()

            inventory[silo].append(dict(data, **{"name": asset["name"]}))

            for key, value in project["data"].items():
                inventory[key] = value

    config = dict(DEFAULTS["config"], **config)

    return config, inventory
Exemple #22
0
    def node_version_color(self, version, node):
        """ Coloring a node by correct color by actual version
        """
        # get all versions in list
        versions = io.find({
            "type": "version",
            "parent": version["parent"]
        }).distinct('name')

        max_version = max(versions)

        # change color of node
        if version.get("name") not in [max_version]:
            node["tile_color"].setValue(int("0xd88467ff", 16))
        else:
            node["tile_color"].setValue(int(self.node_color, 16))
Exemple #23
0
    def set_item_color(cls, track_item, version):

        # define version name
        version_name = version.get("name", None)
        # get all versions in list
        versions = io.find({
            "type": "version",
            "parent": version["parent"]
        }).distinct('name')

        max_version = max(versions)

        # set clip colour
        if version_name == max_version:
            track_item.source().binItem().setColor(cls.clip_color_last)
        else:
            track_item.source().binItem().setColor(cls.clip_color)
    def _copy_representations(self, representation_id):
        """Copy all documents and files of representation and dependencies"""
        # Representation
        representation = self._find_one({"_id": representation_id})
        if not representation:
            representation = io.find_one({"_id": representation_id})
            self._insert_one(representation)

            # Version
            version = io.find_one({"_id": representation["parent"]})
            if not self._find_one({"_id": version["_id"]}):
                self._insert_one(version)

                # Subset
                subset = io.find_one({"_id": version["parent"]})
                if not self._find_one({"_id": subset["_id"]}):
                    self._insert_one(subset)

                    # Asset
                    asset = io.find_one({"_id": subset["parent"]})
                    if not self._find_one({"_id": asset["_id"]}):
                        asset["parent"] = self._project["_id"]
                        self._insert_one(asset)

                        # Asset Visual Parent
                        parent_id = asset["data"]["visualParent"]
                        if parent_id:
                            parent_id = io.ObjectId(parent_id)
                            if not self._find_one({"_id": parent_id}):
                                parent_asset = io.find_one({"_id": parent_id})
                                parent_asset["parent"] = self._project["_id"]
                                self._insert_one(parent_asset)

                # Dependencies
                for dependency_id in version["data"]["dependencies"]:
                    dependency_id = io.ObjectId(dependency_id)
                    for representation_ in io.find({"parent": dependency_id}):
                        self._copy_representations(representation_["_id"])

        # Copy package
        parents = io.parenthood(representation)
        src_package = get_representation_path_(representation, parents)
        parents = parents[:-1] + [self._project]
        representation["data"]["reprRoot"] = self._project["data"].get("root")
        dst_package = get_representation_path_(representation, parents)
        self._copy_dir(src_package, dst_package)
Exemple #25
0
    def on_silo_changed(self, index):
        name = model.data(index, "name")
        api.Session["AVALON_SILO"] = name

        frame = self.current_frame()

        self._model.push([
            dict({
                "_id": doc["_id"],
                "name": doc["name"],
                "icon": DEFAULTS["icon"]["asset"],
            }, **doc["data"])
            for doc in sorted(
                io.find({
                    "type": "asset",
                    "parent": frame["project"],
                    "silo": name
                }),

                # Hard-sort by group
                # TODO(marcus): Sorting should really happen in
                # the model, via e.g. a Proxy.
                key=lambda item: (
                    # Sort by group
                    item["data"].get(
                        "group",

                        # Put items without a
                        # group at the top
                        "0"),

                    # Sort inner items by name
                    item["name"]
                )
            )

            # Discard hidden items
            if self._get_asset_visible(doc)
        ])

        frame["environment"]["silo"] = name

        self._frames.append(frame)
        self.pushed.emit(name)
Exemple #26
0
    def process(self, context):
        asset_names = set()
        for instance in context:
            asset_names.add(instance.data["asset"])

        asset_docs = io.find(
            {
                "type": "asset",
                "name": {"$in": list(asset_names)}
            },
            {
                "name": 1,
                "data.tasks": 1
            }
        )
        tasks_by_asset_names = {}
        for asset_doc in asset_docs:
            asset_name = asset_doc["name"]
            asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
            tasks_by_asset_names[asset_name] = list(asset_tasks.keys())

        missing_tasks = []
        for instance in context:
            asset_name = instance.data["asset"]
            task_name = instance.data["task"]
            task_names = tasks_by_asset_names.get(asset_name) or []
            if task_name and task_name in task_names:
                continue
            missing_tasks.append((asset_name, task_name))

        # Everything is OK
        if not missing_tasks:
            return

        # Raise an exception
        msg = "Couldn't find task name/s required for publishing.\n{}"
        pair_msgs = []
        for missing_pair in missing_tasks:
            pair_msgs.append(
                "Asset: \"{}\" Task: \"{}\"".format(*missing_pair)
            )

        raise AssertionError(msg.format("\n".join(pair_msgs)))
Exemple #27
0
    def _add_hierarchy(self, parent=None):

        # Find the assets under the parent
        find_data = {"type": "asset"}
        if parent is None:
            find_data['$or'] = [{
                'data.visualParent': {
                    '$exists': False
                }
            }, {
                'data.visualParent': None
            }]
        else:
            find_data["data.visualParent"] = parent['_id']

        assets = io.find(find_data).sort('name', 1)
        for asset in assets:
            # get label from data, otherwise use name
            data = asset.get("data", {})
            label = data.get("label", asset['name'])
            tags = data.get("tags", [])

            # store for the asset for optimization
            deprecated = "deprecated" in tags

            node = Node({
                "_id": asset['_id'],
                "name": asset["name"],
                "label": label,
                "type": asset['type'],
                "tags": ", ".join(tags),
                "deprecated": deprecated,
                "_document": asset
            })
            self.add_child(node, parent=parent)

            # Add asset's children recursively
            self._add_hierarchy(node)
Exemple #28
0
def update_frame_range(comp, representations):
    """Update the frame range of the comp and render length

    The start and end frame are based on the lowest start frame and the highest
    end frame

    Args:
        comp (object): current focused comp
        representations (list) collection of dicts

    Returns:
        None

    """

    version_ids = [r["parent"] for r in representations]
    versions = io.find({"type": "version", "_id": {"$in": version_ids}})
    versions = list(versions)

    start = min(v["data"]["frameStart"] for v in versions)
    end = max(v["data"]["frameEnd"] for v in versions)

    fusion_lib.update_frame_range(start, end, comp=comp)
Exemple #29
0
def load(name):
    """Read project called `name` from database

    Arguments:
        name (str): Project name

    """

    print("Loading .inventory.toml and .config.toml..")

    project = io.find_one({"type": "project"})

    if project is None:
        raise Exception("'%s' not found, try --init "
                        "to start a new project." % name)

    else:
        config = project["config"]
        inventory = {"schema": "avalon-core:inventory-1.0"}
        for asset in io.find({"type": "asset", "parent": project["_id"]}):
            silo = asset["silo"]
            data = asset["data"]

            if silo not in inventory:
                inventory[silo] = list()

            inventory[silo].append(dict(data, **{"name": asset["name"]}))

            for key, value in project["data"].items():
                inventory[key] = value

    config = dict(
        DEFAULTS["config"],
        **config
    )

    return config, inventory
Exemple #30
0
    def register(self, instance):
        # Required environment variables
        anatomy_data = instance.data["anatomyData"]

        io.install()

        context = instance.context

        project_entity = instance.data["projectEntity"]

        context_asset_name = context.data["assetEntity"]["name"]

        asset_name = instance.data["asset"]
        asset_entity = instance.data.get("assetEntity")
        if not asset_entity or asset_entity["name"] != context_asset_name:
            asset_entity = io.find_one({
                "type": "asset",
                "name": asset_name,
                "parent": project_entity["_id"]
            })
            assert asset_entity, (
                "No asset found by the name \"{0}\" in project \"{1}\""
            ).format(asset_name, project_entity["name"])

            instance.data["assetEntity"] = asset_entity

            # update anatomy data with asset specific keys
            # - name should already been set
            hierarchy = ""
            parents = asset_entity["data"]["parents"]
            if parents:
                hierarchy = "/".join(parents)
            anatomy_data["hierarchy"] = hierarchy

        task_name = instance.data.get("task")
        if task_name:
            anatomy_data["task"] = task_name

        anatomy_data["family"] = instance.data.get("family")

        stagingdir = instance.data.get("stagingDir")
        if not stagingdir:
            self.log.info(
                ("{0} is missing reference to staging directory."
                 " Will try to get it from representation.").format(instance))

        else:
            self.log.debug(
                "Establishing staging directory @ {0}".format(stagingdir))

        # Ensure at least one file is set up for transfer in staging dir.
        repres = instance.data.get("representations")
        assert repres, "Instance has no files to transfer"
        assert isinstance(
            repres,
            (list,
             tuple)), ("Instance 'files' must be a list, got: {0} {1}".format(
                 str(type(repres)), str(repres)))

        subset = self.get_subset(asset_entity, instance)
        instance.data["subsetEntity"] = subset

        version_number = instance.data["version"]
        self.log.debug("Next version: v{}".format(version_number))

        version_data = self.create_version_data(context, instance)

        version_data_instance = instance.data.get('versionData')
        if version_data_instance:
            version_data.update(version_data_instance)

        # TODO rename method from `create_version` to
        # `prepare_version` or similar...
        version = self.create_version(subset=subset,
                                      version_number=version_number,
                                      data=version_data)

        self.log.debug("Creating version ...")

        new_repre_names_low = [_repre["name"].lower() for _repre in repres]

        existing_version = io.find_one({
            'type': 'version',
            'parent': subset["_id"],
            'name': version_number
        })

        if existing_version is None:
            version_id = io.insert_one(version).inserted_id
        else:
            # Check if instance have set `append` mode which cause that
            # only replicated representations are set to archive
            append_repres = instance.data.get("append", False)

            # Update version data
            # TODO query by _id and
            io.update_many(
                {
                    'type': 'version',
                    'parent': subset["_id"],
                    'name': version_number
                }, {'$set': version})
            version_id = existing_version['_id']

            # Find representations of existing version and archive them
            current_repres = list(
                io.find({
                    "type": "representation",
                    "parent": version_id
                }))
            bulk_writes = []
            for repre in current_repres:
                if append_repres:
                    # archive only duplicated representations
                    if repre["name"].lower() not in new_repre_names_low:
                        continue
                # Representation must change type,
                # `_id` must be stored to other key and replaced with new
                # - that is because new representations should have same ID
                repre_id = repre["_id"]
                bulk_writes.append(DeleteOne({"_id": repre_id}))

                repre["orig_id"] = repre_id
                repre["_id"] = io.ObjectId()
                repre["type"] = "archived_representation"
                bulk_writes.append(InsertOne(repre))

            # bulk updates
            if bulk_writes:
                io._database[io.Session["AVALON_PROJECT"]].bulk_write(
                    bulk_writes)

        version = io.find_one({"_id": version_id})
        instance.data["versionEntity"] = version

        existing_repres = list(
            io.find({
                "parent": version_id,
                "type": "archived_representation"
            }))

        instance.data['version'] = version['name']

        intent_value = instance.context.data.get("intent")
        if intent_value and isinstance(intent_value, dict):
            intent_value = intent_value.get("value")

        if intent_value:
            anatomy_data["intent"] = intent_value

        anatomy = instance.context.data['anatomy']

        # Find the representations to transfer amongst the files
        # Each should be a single representation (as such, a single extension)
        representations = []
        destination_list = []

        if 'transfers' not in instance.data:
            instance.data['transfers'] = []

        template_name = self.template_name_from_instance(instance)

        published_representations = {}
        for idx, repre in enumerate(instance.data["representations"]):
            published_files = []

            # create template data for Anatomy
            template_data = copy.deepcopy(anatomy_data)
            if intent_value is not None:
                template_data["intent"] = intent_value

            resolution_width = repre.get("resolutionWidth")
            resolution_height = repre.get("resolutionHeight")
            fps = instance.data.get("fps")

            if resolution_width:
                template_data["resolution_width"] = resolution_width
            if resolution_width:
                template_data["resolution_height"] = resolution_height
            if resolution_width:
                template_data["fps"] = fps

            files = repre['files']
            if repre.get('stagingDir'):
                stagingdir = repre['stagingDir']

            if repre.get("outputName"):
                template_data["output"] = repre['outputName']

            template = os.path.normpath(
                anatomy.templates[template_name]["path"])

            sequence_repre = isinstance(files, list)
            repre_context = None
            if sequence_repre:
                self.log.debug("files: {}".format(files))
                src_collections, remainder = clique.assemble(files)
                self.log.debug("src_tail_collections: {}".format(
                    str(src_collections)))
                src_collection = src_collections[0]

                # Assert that each member has identical suffix
                src_head = src_collection.format("{head}")
                src_tail = src_collection.format("{tail}")

                # fix dst_padding
                valid_files = [x for x in files if src_collection.match(x)]
                padd_len = len(valid_files[0].replace(src_head, "").replace(
                    src_tail, ""))
                src_padding_exp = "%0{}d".format(padd_len)

                test_dest_files = list()
                for i in [1, 2]:
                    template_data["representation"] = repre['ext']
                    template_data["frame"] = src_padding_exp % i
                    anatomy_filled = anatomy.format(template_data)
                    template_filled = anatomy_filled[template_name]["path"]
                    if repre_context is None:
                        repre_context = template_filled.used_values
                    test_dest_files.append(os.path.normpath(template_filled))
                template_data["frame"] = repre_context["frame"]

                self.log.debug("test_dest_files: {}".format(
                    str(test_dest_files)))

                dst_collections, remainder = clique.assemble(test_dest_files)
                dst_collection = dst_collections[0]
                dst_head = dst_collection.format("{head}")
                dst_tail = dst_collection.format("{tail}")

                index_frame_start = None

                if repre.get("frameStart"):
                    frame_start_padding = int(anatomy.templates["render"].get(
                        "frame_padding",
                        anatomy.templates["render"].get("padding")))

                    index_frame_start = int(repre.get("frameStart"))

                # exception for slate workflow
                if index_frame_start and "slate" in instance.data["families"]:
                    index_frame_start -= 1

                dst_padding_exp = src_padding_exp
                dst_start_frame = None
                for i in src_collection.indexes:
                    # TODO 1.) do not count padding in each index iteration
                    # 2.) do not count dst_padding from src_padding before
                    #   index_frame_start check
                    src_padding = src_padding_exp % i

                    src_file_name = "{0}{1}{2}".format(src_head, src_padding,
                                                       src_tail)

                    dst_padding = src_padding_exp % i

                    if index_frame_start:
                        dst_padding_exp = "%0{}d".format(frame_start_padding)
                        dst_padding = dst_padding_exp % index_frame_start
                        index_frame_start += 1

                    dst = "{0}{1}{2}".format(dst_head, dst_padding,
                                             dst_tail).replace("..", ".")

                    self.log.debug("destination: `{}`".format(dst))
                    src = os.path.join(stagingdir, src_file_name)

                    self.log.debug("source: {}".format(src))
                    instance.data["transfers"].append([src, dst])

                    published_files.append(dst)

                    # for adding first frame into db
                    if not dst_start_frame:
                        dst_start_frame = dst_padding

                # Store used frame value to template data
                template_data["frame"] = dst_start_frame
                dst = "{0}{1}{2}".format(dst_head, dst_start_frame,
                                         dst_tail).replace("..", ".")
                repre['published_path'] = dst

            else:
                # Single file
                #  _______
                # |      |\
                # |       |
                # |       |
                # |       |
                # |_______|
                #
                template_data.pop("frame", None)
                fname = files
                assert not os.path.isabs(fname), (
                    "Given file name is a full path")

                template_data["representation"] = repre['ext']

                src = os.path.join(stagingdir, fname)
                anatomy_filled = anatomy.format(template_data)
                template_filled = anatomy_filled[template_name]["path"]
                repre_context = template_filled.used_values
                dst = os.path.normpath(template_filled).replace("..", ".")

                instance.data["transfers"].append([src, dst])

                published_files.append(dst)
                repre['published_path'] = dst
                self.log.debug("__ dst: {}".format(dst))

            repre["publishedFiles"] = published_files

            for key in self.db_representation_context_keys:
                value = template_data.get(key)
                if not value:
                    continue
                repre_context[key] = template_data[key]

            # Use previous representation's id if there are any
            repre_id = None
            repre_name_low = repre["name"].lower()
            for _repre in existing_repres:
                # NOTE should we check lowered names?
                if repre_name_low == _repre["name"]:
                    repre_id = _repre["orig_id"]
                    break

            # Create new id if existing representations does not match
            if repre_id is None:
                repre_id = io.ObjectId()

            representation = {
                "_id": repre_id,
                "schema": "pype:representation-2.0",
                "type": "representation",
                "parent": version_id,
                "name": repre['name'],
                "data": {
                    'path': dst,
                    'template': template
                },
                "dependencies": instance.data.get("dependencies", "").split(),

                # Imprint shortcut to context
                # for performance reasons.
                "context": repre_context
            }

            if repre.get("outputName"):
                representation["context"]["output"] = repre['outputName']

            if sequence_repre and repre.get("frameStart"):
                representation['context']['frame'] = (
                    dst_padding_exp % int(repre.get("frameStart")))

            self.log.debug("__ representation: {}".format(representation))
            destination_list.append(dst)
            self.log.debug("__ destination_list: {}".format(destination_list))
            instance.data['destination_list'] = destination_list
            representations.append(representation)
            published_representations[repre_id] = {
                "representation": representation,
                "anatomy_data": template_data,
                "published_files": published_files
            }
            self.log.debug("__ representations: {}".format(representations))

        # Remove old representations if there are any (before insertion of new)
        if existing_repres:
            repre_ids_to_remove = []
            for repre in existing_repres:
                repre_ids_to_remove.append(repre["_id"])
            io.delete_many({"_id": {"$in": repre_ids_to_remove}})

        self.log.debug("__ representations: {}".format(representations))
        for rep in instance.data["representations"]:
            self.log.debug("__ represNAME: {}".format(rep['name']))
            self.log.debug("__ represPATH: {}".format(rep['published_path']))
        io.insert_many(representations)
        instance.data["published_representations"] = (
            published_representations)
        # self.log.debug("Representation: {}".format(representations))
        self.log.info("Registered {} items".format(len(representations)))