Esempio n. 1
0
    def from_representation(self, representation_id):
        """Generate job from representation

        Args:
            representation_id (str): Avalon representation Id

        """
        representation_id = io.ObjectId(representation_id)
        representation = io.find_one({
            "type": "representation",
            "_id": representation_id
        })
        context = cget_representation_context(representation)
        project = context["project"]
        asset = context["asset"]
        subset = context["subset"]
        version = context["version"]

        # Use loader to get local representation path
        for Loader in self.available_loaders:
            if not pipeline.is_compatible_loader(Loader, context):
                continue

            loader = Loader(context)
            if hasattr(loader, "fname"):
                repr_path = loader.fname
                break
        else:
            raise Exception("Counld not find Loader for '%s'"
                            "" % representation["name"])

        # Compute remote representation path
        template = project["config"]["template"]["publish"]
        remote_repr_path = template.format(
            **{
                "root": self.remote_root,
                "project": project["name"],
                "asset": asset["name"],
                "silo": asset["silo"],
                "subset": subset["name"],
                "version": version["name"],
                "representation": representation["name"],
            })

        # Get dir
        if os.path.isdir(repr_path):
            repr_dir = repr_path
            remote_dir = remote_repr_path
        elif os.path.isfile(repr_path):
            repr_dir = os.path.dirname(repr_path)
            remote_dir = os.path.dirname(remote_repr_path)
        else:
            raise Exception("Representation not exists.")

        # Collect all files
        jobs = list()

        repr_dir = os.path.normpath(repr_dir)
        remote_dir = os.path.normpath(remote_dir)

        for head, dir, files in os.walk(repr_dir):

            remote_head = remote_dir + head[len(repr_dir):]

            for fname in files:
                if any(fname.endswith(ext) for ext in self.FILE_IGNORE):
                    continue

                local_file = os.path.join(head, fname)
                remote_path = os.path.join(remote_head, fname)

                jobs.append((local_file, remote_path))

        if not jobs:
            return

        # Add job
        description = ("[{asset}] {subset}.v{ver:0>3} - {repr}"
                       "".format(asset=asset["name"],
                                 subset=subset["name"],
                                 ver=version["name"],
                                 repr=representation["name"]))
        self.add_job(files=jobs,
                     type="Representation",
                     description=description)
Esempio n. 2
0
    def register(self, instance):
        # Required environment variables
        anatomy_data = instance.data["anatomyData"]

        io.install()

        context = instance.context

        project_entity = instance.data["projectEntity"]

        context_asset_name = context.data["assetEntity"]["name"]

        asset_name = instance.data["asset"]
        asset_entity = instance.data.get("assetEntity")
        if not asset_entity or asset_entity["name"] != context_asset_name:
            asset_entity = io.find_one({
                "type": "asset",
                "name": asset_name,
                "parent": project_entity["_id"]
            })
            assert asset_entity, (
                "No asset found by the name \"{0}\" in project \"{1}\""
            ).format(asset_name, project_entity["name"])

            instance.data["assetEntity"] = asset_entity

            # update anatomy data with asset specific keys
            # - name should already been set
            hierarchy = ""
            parents = asset_entity["data"]["parents"]
            if parents:
                hierarchy = "/".join(parents)
            anatomy_data["hierarchy"] = hierarchy

        task_name = instance.data.get("task")
        if task_name:
            anatomy_data["task"] = task_name

        anatomy_data["family"] = instance.data.get("family")

        stagingdir = instance.data.get("stagingDir")
        if not stagingdir:
            self.log.info(
                ("{0} is missing reference to staging directory."
                 " Will try to get it from representation.").format(instance))

        else:
            self.log.debug(
                "Establishing staging directory @ {0}".format(stagingdir))

        # Ensure at least one file is set up for transfer in staging dir.
        repres = instance.data.get("representations")
        assert repres, "Instance has no files to transfer"
        assert isinstance(
            repres,
            (list,
             tuple)), ("Instance 'files' must be a list, got: {0} {1}".format(
                 str(type(repres)), str(repres)))

        subset = self.get_subset(asset_entity, instance)
        instance.data["subsetEntity"] = subset

        version_number = instance.data["version"]
        self.log.debug("Next version: v{}".format(version_number))

        version_data = self.create_version_data(context, instance)

        version_data_instance = instance.data.get('versionData')
        if version_data_instance:
            version_data.update(version_data_instance)

        # TODO rename method from `create_version` to
        # `prepare_version` or similar...
        version = self.create_version(subset=subset,
                                      version_number=version_number,
                                      data=version_data)

        self.log.debug("Creating version ...")

        new_repre_names_low = [_repre["name"].lower() for _repre in repres]

        existing_version = io.find_one({
            'type': 'version',
            'parent': subset["_id"],
            'name': version_number
        })

        if existing_version is None:
            version_id = io.insert_one(version).inserted_id
        else:
            # Check if instance have set `append` mode which cause that
            # only replicated representations are set to archive
            append_repres = instance.data.get("append", False)

            # Update version data
            # TODO query by _id and
            io.update_many(
                {
                    'type': 'version',
                    'parent': subset["_id"],
                    'name': version_number
                }, {'$set': version})
            version_id = existing_version['_id']

            # Find representations of existing version and archive them
            current_repres = list(
                io.find({
                    "type": "representation",
                    "parent": version_id
                }))
            bulk_writes = []
            for repre in current_repres:
                if append_repres:
                    # archive only duplicated representations
                    if repre["name"].lower() not in new_repre_names_low:
                        continue
                # Representation must change type,
                # `_id` must be stored to other key and replaced with new
                # - that is because new representations should have same ID
                repre_id = repre["_id"]
                bulk_writes.append(DeleteOne({"_id": repre_id}))

                repre["orig_id"] = repre_id
                repre["_id"] = io.ObjectId()
                repre["type"] = "archived_representation"
                bulk_writes.append(InsertOne(repre))

            # bulk updates
            if bulk_writes:
                io._database[io.Session["AVALON_PROJECT"]].bulk_write(
                    bulk_writes)

        version = io.find_one({"_id": version_id})
        instance.data["versionEntity"] = version

        existing_repres = list(
            io.find({
                "parent": version_id,
                "type": "archived_representation"
            }))

        instance.data['version'] = version['name']

        intent_value = instance.context.data.get("intent")
        if intent_value and isinstance(intent_value, dict):
            intent_value = intent_value.get("value")

        if intent_value:
            anatomy_data["intent"] = intent_value

        anatomy = instance.context.data['anatomy']

        # Find the representations to transfer amongst the files
        # Each should be a single representation (as such, a single extension)
        representations = []
        destination_list = []

        orig_transfers = []
        if 'transfers' not in instance.data:
            instance.data['transfers'] = []
        else:
            orig_transfers = list(instance.data['transfers'])

        template_name = self.template_name_from_instance(instance)

        published_representations = {}
        for idx, repre in enumerate(instance.data["representations"]):
            # reset transfers for next representation
            # instance.data['transfers'] is used as a global variable
            # in current codebase
            instance.data['transfers'] = list(orig_transfers)

            if "delete" in repre.get("tags", []):
                continue

            published_files = []

            # create template data for Anatomy
            template_data = copy.deepcopy(anatomy_data)
            if intent_value is not None:
                template_data["intent"] = intent_value

            resolution_width = repre.get("resolutionWidth")
            resolution_height = repre.get("resolutionHeight")
            fps = instance.data.get("fps")

            if resolution_width:
                template_data["resolution_width"] = resolution_width
            if resolution_width:
                template_data["resolution_height"] = resolution_height
            if resolution_width:
                template_data["fps"] = fps

            files = repre['files']
            if repre.get('stagingDir'):
                stagingdir = repre['stagingDir']

            if repre.get("outputName"):
                template_data["output"] = repre['outputName']

            template_data["representation"] = repre["name"]

            ext = repre["ext"]
            if ext.startswith("."):
                self.log.warning(
                    ("Implementaion warning: <\"{}\">"
                     " Representation's extension stored under \"ext\" key "
                     " started with dot (\"{}\").").format(repre["name"], ext))
                ext = ext[1:]
            repre["ext"] = ext
            template_data["ext"] = ext

            template = os.path.normpath(
                anatomy.templates[template_name]["path"])

            sequence_repre = isinstance(files, list)
            repre_context = None
            if sequence_repre:
                self.log.debug("files: {}".format(files))
                src_collections, remainder = clique.assemble(files)
                self.log.debug("src_tail_collections: {}".format(
                    str(src_collections)))
                src_collection = src_collections[0]

                # Assert that each member has identical suffix
                src_head = src_collection.format("{head}")
                src_tail = src_collection.format("{tail}")

                # fix dst_padding
                valid_files = [x for x in files if src_collection.match(x)]
                padd_len = len(valid_files[0].replace(src_head, "").replace(
                    src_tail, ""))
                src_padding_exp = "%0{}d".format(padd_len)

                test_dest_files = list()
                for i in [1, 2]:
                    template_data["frame"] = src_padding_exp % i
                    anatomy_filled = anatomy.format(template_data)
                    template_filled = anatomy_filled[template_name]["path"]
                    if repre_context is None:
                        repre_context = template_filled.used_values
                    test_dest_files.append(os.path.normpath(template_filled))
                template_data["frame"] = repre_context["frame"]

                self.log.debug("test_dest_files: {}".format(
                    str(test_dest_files)))

                dst_collections, remainder = clique.assemble(test_dest_files)
                dst_collection = dst_collections[0]
                dst_head = dst_collection.format("{head}")
                dst_tail = dst_collection.format("{tail}")

                index_frame_start = None

                # TODO use frame padding from right template group
                if repre.get("frameStart") is not None:
                    frame_start_padding = int(anatomy.templates["render"].get(
                        "frame_padding",
                        anatomy.templates["render"].get("padding")))

                    index_frame_start = int(repre.get("frameStart"))

                # exception for slate workflow
                if index_frame_start and "slate" in instance.data["families"]:
                    index_frame_start -= 1

                dst_padding_exp = src_padding_exp
                dst_start_frame = None
                for i in src_collection.indexes:
                    # TODO 1.) do not count padding in each index iteration
                    # 2.) do not count dst_padding from src_padding before
                    #   index_frame_start check
                    src_padding = src_padding_exp % i

                    src_file_name = "{0}{1}{2}".format(src_head, src_padding,
                                                       src_tail)

                    dst_padding = src_padding_exp % i

                    if index_frame_start is not None:
                        dst_padding_exp = "%0{}d".format(frame_start_padding)
                        dst_padding = dst_padding_exp % index_frame_start
                        index_frame_start += 1

                    dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)

                    self.log.debug("destination: `{}`".format(dst))
                    src = os.path.join(stagingdir, src_file_name)

                    self.log.debug("source: {}".format(src))
                    instance.data["transfers"].append([src, dst])

                    published_files.append(dst)

                    # for adding first frame into db
                    if not dst_start_frame:
                        dst_start_frame = dst_padding

                # Store used frame value to template data
                template_data["frame"] = dst_start_frame
                dst = "{0}{1}{2}".format(dst_head, dst_start_frame, dst_tail)
                repre['published_path'] = dst

            else:
                # Single file
                #  _______
                # |      |\
                # |       |
                # |       |
                # |       |
                # |_______|
                #
                template_data.pop("frame", None)
                fname = files
                assert not os.path.isabs(fname), (
                    "Given file name is a full path")

                src = os.path.join(stagingdir, fname)
                anatomy_filled = anatomy.format(template_data)
                template_filled = anatomy_filled[template_name]["path"]
                repre_context = template_filled.used_values
                dst = os.path.normpath(template_filled)

                instance.data["transfers"].append([src, dst])

                published_files.append(dst)
                repre['published_path'] = dst
                self.log.debug("__ dst: {}".format(dst))

            repre["publishedFiles"] = published_files

            for key in self.db_representation_context_keys:
                value = template_data.get(key)
                if not value:
                    continue
                repre_context[key] = template_data[key]

            # Use previous representation's id if there are any
            repre_id = None
            repre_name_low = repre["name"].lower()
            for _repre in existing_repres:
                # NOTE should we check lowered names?
                if repre_name_low == _repre["name"]:
                    repre_id = _repre["orig_id"]
                    break

            # Create new id if existing representations does not match
            if repre_id is None:
                repre_id = io.ObjectId()

            data = repre.get("data") or {}
            data.update({'path': dst, 'template': template})
            representation = {
                "_id": repre_id,
                "schema": "pype:representation-2.0",
                "type": "representation",
                "parent": version_id,
                "name": repre['name'],
                "data": data,
                "dependencies": instance.data.get("dependencies", "").split(),

                # Imprint shortcut to context
                # for performance reasons.
                "context": repre_context
            }

            if repre.get("outputName"):
                representation["context"]["output"] = repre['outputName']

            if sequence_repre and repre.get("frameStart"):
                representation['context']['frame'] = (
                    dst_padding_exp % int(repre.get("frameStart")))

            # any file that should be physically copied is expected in
            # 'transfers' or 'hardlinks'
            if instance.data.get('transfers', False) or \
               instance.data.get('hardlinks', False):
                # could throw exception, will be caught in 'process'
                # all integration to DB is being done together lower,
                # so no rollback needed
                self.log.debug("Integrating source files to destination ...")
                self.integrated_file_sizes.update(self.integrate(instance))
                self.log.debug("Integrated files {}".format(
                    self.integrated_file_sizes))

            # get 'files' info for representation and all attached resources
            self.log.debug("Preparing files information ...")
            representation["files"] = self.get_files_info(
                instance, self.integrated_file_sizes)

            self.log.debug("__ representation: {}".format(representation))
            destination_list.append(dst)
            self.log.debug("__ destination_list: {}".format(destination_list))
            instance.data['destination_list'] = destination_list
            representations.append(representation)
            published_representations[repre_id] = {
                "representation": representation,
                "anatomy_data": template_data,
                "published_files": published_files
            }
            self.log.debug("__ representations: {}".format(representations))

        # Remove old representations if there are any (before insertion of new)
        if existing_repres:
            repre_ids_to_remove = []
            for repre in existing_repres:
                repre_ids_to_remove.append(repre["_id"])
            io.delete_many({"_id": {"$in": repre_ids_to_remove}})

        for rep in instance.data["representations"]:
            self.log.debug("__ rep: {}".format(rep))

        io.insert_many(representations)
        instance.data["published_representations"] = (
            published_representations)
        # self.log.debug("Representation: {}".format(representations))
        self.log.info("Registered {} items".format(len(representations)))
Esempio n. 3
0
File: lib.py Progetto: ldunham1/pype
def switch_item(container,
                asset_name=None,
                subset_name=None,
                representation_name=None):
    """Switch container asset, subset or representation of a container by name.

    It'll always switch to the latest version - of course a different
    approach could be implemented.

    Args:
        container (dict): data of the item to switch with
        asset_name (str): name of the asset
        subset_name (str): name of the subset
        representation_name (str): name of the representation

    Returns:
        dict

    """

    if all(not x for x in [asset_name, subset_name, representation_name]):
        raise ValueError("Must have at least one change provided to switch.")

    # Collect any of current asset, subset and representation if not provided
    # so we can use the original name from those.
    if any(not x for x in [asset_name, subset_name, representation_name]):
        _id = io.ObjectId(container["representation"])
        representation = io.find_one({"type": "representation", "_id": _id})
        version, subset, asset, project = io.parenthood(representation)

        if asset_name is None:
            asset_name = asset["name"]

        if subset_name is None:
            subset_name = subset["name"]

        if representation_name is None:
            representation_name = representation["name"]

    # Find the new one
    asset = io.find_one({"name": asset_name, "type": "asset"})
    assert asset, ("Could not find asset in the database with the name "
                   "'%s'" % asset_name)

    subset = io.find_one({
        "name": subset_name,
        "type": "subset",
        "parent": asset["_id"]
    })
    assert subset, ("Could not find subset in the database with the name "
                    "'%s'" % subset_name)

    version = io.find_one({
        "type": "version",
        "parent": subset["_id"]
    },
                          sort=[('name', -1)])

    assert version, "Could not find a version for {}.{}".format(
        asset_name, subset_name)

    representation = io.find_one({
        "name": representation_name,
        "type": "representation",
        "parent": version["_id"]
    })

    assert representation, ("Could not find representation in the database "
                            "with the name '%s'" % representation_name)

    avalon.api.switch(container, representation)

    return representation
Esempio n. 4
0
def update_scene(set_container, containers, current_data, new_data, new_file):
    """Updates the hierarchy, assets and their matrix

    Updates the following withing the scene:
        * Setdress hierarchy alembic
        * Matrix
        * Parenting
        * Representations

    It removes any assets which are not present in the new build data

    Args:
        set_container (dict): the setdress container of the scene
        containers (list): the list of containers under the setdress container
        current_data (dict): the current build data of the setdress
        new_data (dict): the new build data of the setdres

    Returns:
        processed_containers (list): all new and updated containers

    """

    from pype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms

    set_namespace = set_container['namespace']

    # Update the setdress hierarchy alembic
    set_root = get_container_transforms(set_container, root=True)
    set_hierarchy_root = cmds.listRelatives(set_root, fullPath=True)[0]
    set_hierarchy_reference = cmds.referenceQuery(set_hierarchy_root,
                                                  referenceNode=True)
    new_alembic = new_file.replace(".json", ".abc")
    assert os.path.exists(new_alembic), "%s does not exist." % new_alembic
    with unlocked(cmds.listRelatives(set_root, ad=True, fullPath=True)):
        cmds.file(new_alembic,
                  loadReference=set_hierarchy_reference,
                  type="Alembic")

    identity = DEFAULT_MATRIX[:]

    processed_namespaces = set()
    processed_containers = list()

    new_lookup = _instances_by_namespace(new_data)
    old_lookup = _instances_by_namespace(current_data)
    for container in containers:
        container_ns = container['namespace']

        # Consider it processed here, even it it fails we want to store that
        # the namespace was already available.
        processed_namespaces.add(container_ns)
        processed_containers.append(container['objectName'])

        if container_ns in new_lookup:
            root = get_container_transforms(container, root=True)
            if not root:
                log.error("Can't find root for %s", container['objectName'])
                continue

            old_instance = old_lookup.get(container_ns, {})
            new_instance = new_lookup[container_ns]

            # Update the matrix
            # check matrix against old_data matrix to find local overrides
            current_matrix = cmds.xform(root,
                                        query=True,
                                        matrix=True,
                                        objectSpace=True)

            original_matrix = old_instance.get("matrix", identity)
            has_matrix_override = not matrix_equals(current_matrix,
                                                    original_matrix)

            if has_matrix_override:
                log.warning("Matrix override preserved on %s", container_ns)
            else:
                new_matrix = new_instance.get("matrix", identity)
                cmds.xform(root, matrix=new_matrix, objectSpace=True)

            # Update the parenting
            if old_instance.get("parent", None) != new_instance["parent"]:

                parent = to_namespace(new_instance['parent'], set_namespace)
                if not cmds.objExists(parent):
                    log.error("Can't find parent %s", parent)
                    continue

                # Set the new parent
                cmds.lockNode(root, lock=False)
                root = cmds.parent(root, parent, relative=True)
                cmds.lockNode(root, lock=True)

            # Update the representation
            representation_current = container['representation']
            representation_old = old_instance['representation']
            representation_new = new_instance['representation']
            has_representation_override = (representation_current !=
                                           representation_old)

            if representation_new != representation_current:

                if has_representation_override:
                    log.warning(
                        "Your scene had local representation "
                        "overrides within the set. New "
                        "representations not loaded for %s.", container_ns)
                    continue

                # We check it against the current 'loader' in the scene instead
                # of the original data of the package that was loaded because
                # an Artist might have made scene local overrides
                if new_instance['loader'] != container['loader']:
                    log.warning(
                        "Loader is switched - local edits will be "
                        "lost. Removing: %s", container_ns)

                    # Remove this from the "has been processed" list so it's
                    # considered as new element and added afterwards.
                    processed_containers.pop()
                    processed_namespaces.remove(container_ns)
                    api.remove(container)
                    continue

                # Check whether the conversion can be done by the Loader.
                # They *must* use the same asset, subset and Loader for
                # `api.update` to make sense.
                old = io.find_one({"_id": io.ObjectId(representation_current)})
                new = io.find_one({"_id": io.ObjectId(representation_new)})
                is_valid = compare_representations(old=old, new=new)
                if not is_valid:
                    log.error("Skipping: %s. See log for details.",
                              container_ns)
                    continue

                new_version = new["context"]["version"]
                api.update(container, version=new_version)

        else:
            # Remove this container because it's not in the new data
            log.warning("Removing content: %s", container_ns)
            api.remove(container)

    # Add new assets
    all_loaders = api.discover(api.Loader)
    for representation_id, instances in new_data.items():

        # Find the compatible loaders
        loaders = api.loaders_from_representation(all_loaders,
                                                  representation_id)
        for instance in instances:

            # Already processed in update functionality
            if instance['namespace'] in processed_namespaces:
                continue

            container = _add(instance=instance,
                             representation_id=representation_id,
                             loaders=loaders,
                             namespace=set_container['namespace'],
                             root=set_root)

            # Add to the setdress container
            cmds.sets(container, addElement=set_container['objectName'])

            processed_containers.append(container)

    return processed_containers
Esempio n. 5
0
    def process(self, instance):
        self.log.debug(
            "--- Integration of Hero version for subset `{}` begins.".format(
                instance.data.get("subset", str(instance))
            )
        )
        published_repres = instance.data.get("published_representations")
        if not published_repres:
            self.log.debug(
                "*** There are not published representations on the instance."
            )
            return

        project_name = api.Session["AVALON_PROJECT"]

        # TODO raise error if Hero not set?
        anatomy = instance.context.data["anatomy"]
        if "hero" not in anatomy.templates:
            self.log.warning("!!! Anatomy does not have set `hero` key!")
            return

        if "path" not in anatomy.templates["hero"]:
            self.log.warning((
                "!!! There is not set `path` template in `hero` anatomy"
                " for project \"{}\"."
            ).format(project_name))
            return

        hero_template = anatomy.templates["hero"]["path"]
        self.log.debug("`hero` template check was successful. `{}`".format(
            hero_template
        ))

        hero_publish_dir = self.get_publish_dir(instance)

        src_version_entity = instance.data.get("versionEntity")
        filtered_repre_ids = []
        for repre_id, repre_info in published_repres.items():
            repre = repre_info["representation"]
            if repre["name"].lower() in self.ignored_representation_names:
                self.log.debug(
                    "Filtering representation with name: `{}`".format(
                        repre["name"].lower()
                    )
                )
                filtered_repre_ids.append(repre_id)

        for repre_id in filtered_repre_ids:
            published_repres.pop(repre_id, None)

        if not published_repres:
            self.log.debug(
                "*** All published representations were filtered by name."
            )
            return

        if src_version_entity is None:
            self.log.debug((
                "Published version entity was not sent in representation data."
                " Querying entity from database."
            ))
            src_version_entity = (
                self.version_from_representations(published_repres)
            )

        if not src_version_entity:
            self.log.warning((
                "!!! Can't find origin version in database."
                " Skipping hero version publish."
            ))
            return

        all_copied_files = []
        transfers = instance.data.get("transfers", list())
        for _src, dst in transfers:
            dst = os.path.normpath(dst)
            if dst not in all_copied_files:
                all_copied_files.append(dst)

        hardlinks = instance.data.get("hardlinks", list())
        for _src, dst in hardlinks:
            dst = os.path.normpath(dst)
            if dst not in all_copied_files:
                all_copied_files.append(dst)

        all_repre_file_paths = []
        for repre_info in published_repres.values():
            published_files = repre_info.get("published_files") or []
            for file_path in published_files:
                file_path = os.path.normpath(file_path)
                if file_path not in all_repre_file_paths:
                    all_repre_file_paths.append(file_path)

        # TODO this is not best practice of getting resources for publish
        # WARNING due to this we must remove all files from hero publish dir
        instance_publish_dir = os.path.normpath(
            instance.data["publishDir"]
        )
        other_file_paths_mapping = []
        for file_path in all_copied_files:
            # Check if it is from publishDir
            if not file_path.startswith(instance_publish_dir):
                continue

            if file_path in all_repre_file_paths:
                continue

            dst_filepath = file_path.replace(
                instance_publish_dir, hero_publish_dir
            )
            other_file_paths_mapping.append((file_path, dst_filepath))

        # Current version
        old_version, old_repres = (
            self.current_hero_ents(src_version_entity)
        )

        old_repres_by_name = {
            repre["name"].lower(): repre for repre in old_repres
        }

        if old_version:
            new_version_id = old_version["_id"]
        else:
            new_version_id = io.ObjectId()

        new_hero_version = {
            "_id": new_version_id,
            "version_id": src_version_entity["_id"],
            "parent": src_version_entity["parent"],
            "type": "hero_version",
            "schema": "pype:hero_version-1.0"
        }
        schema.validate(new_hero_version)

        # Don't make changes in database until everything is O.K.
        bulk_writes = []

        if old_version:
            self.log.debug("Replacing old hero version.")
            bulk_writes.append(
                ReplaceOne(
                    {"_id": new_hero_version["_id"]},
                    new_hero_version
                )
            )
        else:
            self.log.debug("Creating first hero version.")
            bulk_writes.append(
                InsertOne(new_hero_version)
            )

        # Separate old representations into `to replace` and `to delete`
        old_repres_to_replace = {}
        old_repres_to_delete = {}
        for repre_info in published_repres.values():
            repre = repre_info["representation"]
            repre_name_low = repre["name"].lower()
            if repre_name_low in old_repres_by_name:
                old_repres_to_replace[repre_name_low] = (
                    old_repres_by_name.pop(repre_name_low)
                )

        if old_repres_by_name:
            old_repres_to_delete = old_repres_by_name

        archived_repres = list(io.find({
            # Check what is type of archived representation
            "type": "archived_repsentation",
            "parent": new_version_id
        }))
        archived_repres_by_name = {}
        for repre in archived_repres:
            repre_name_low = repre["name"].lower()
            archived_repres_by_name[repre_name_low] = repre

        backup_hero_publish_dir = None
        if os.path.exists(hero_publish_dir):
            backup_hero_publish_dir = hero_publish_dir + ".BACKUP"
            max_idx = 10
            idx = 0
            _backup_hero_publish_dir = backup_hero_publish_dir
            while os.path.exists(_backup_hero_publish_dir):
                self.log.debug((
                    "Backup folder already exists."
                    " Trying to remove \"{}\""
                ).format(_backup_hero_publish_dir))

                try:
                    shutil.rmtree(_backup_hero_publish_dir)
                    backup_hero_publish_dir = _backup_hero_publish_dir
                    break
                except Exception:
                    self.log.info((
                        "Could not remove previous backup folder."
                        " Trying to add index to folder name"
                    ))

                _backup_hero_publish_dir = (
                    backup_hero_publish_dir + str(idx)
                )
                if not os.path.exists(_backup_hero_publish_dir):
                    backup_hero_publish_dir = _backup_hero_publish_dir
                    break

                if idx > max_idx:
                    raise AssertionError((
                        "Backup folders are fully occupied to max index \"{}\""
                    ).format(max_idx))
                    break

                idx += 1

            self.log.debug("Backup folder path is \"{}\"".format(
                backup_hero_publish_dir
            ))
            try:
                os.rename(hero_publish_dir, backup_hero_publish_dir)
            except PermissionError:
                raise AssertionError((
                    "Could not create hero version because it is not"
                    " possible to replace current hero files."
                ))
        try:
            src_to_dst_file_paths = []
            for repre_info in published_repres.values():

                # Skip if new repre does not have published repre files
                published_files = repre_info["published_files"]
                if len(published_files) == 0:
                    continue

                # Prepare anatomy data
                anatomy_data = repre_info["anatomy_data"]
                anatomy_data.pop("version", None)

                # Get filled path to repre context
                anatomy_filled = anatomy.format(anatomy_data)
                template_filled = anatomy_filled["hero"]["path"]

                repre_data = {
                    "path": str(template_filled),
                    "template": hero_template
                }
                repre_context = template_filled.used_values
                for key in self.db_representation_context_keys:
                    if (
                        key in repre_context or
                        key not in anatomy_data
                    ):
                        continue

                    repre_context[key] = anatomy_data[key]

                # Prepare new repre
                repre = copy.deepcopy(repre_info["representation"])
                repre["parent"] = new_hero_version["_id"]
                repre["context"] = repre_context
                repre["data"] = repre_data
                repre.pop("_id", None)

                # Prepare paths of source and destination files
                if len(published_files) == 1:
                    src_to_dst_file_paths.append(
                        (published_files[0], template_filled)
                    )
                else:
                    collections, remainders = clique.assemble(published_files)
                    if remainders or not collections or len(collections) > 1:
                        raise Exception((
                                            "Integrity error. Files of published representation "
                                            "is combination of frame collections and single files."
                                            "Collections: `{}` Single files: `{}`"
                                        ).format(str(collections),
                                                 str(remainders)))

                    src_col = collections[0]

                    # Get head and tail for collection
                    frame_splitter = "_-_FRAME_SPLIT_-_"
                    anatomy_data["frame"] = frame_splitter
                    _anatomy_filled = anatomy.format(anatomy_data)
                    _template_filled = _anatomy_filled["hero"]["path"]
                    head, tail = _template_filled.split(frame_splitter)
                    padding = int(
                        anatomy.templates["render"].get(
                            "frame_padding",
                            anatomy.templates["render"].get("padding")
                        )
                    )

                    dst_col = clique.Collection(
                        head=head, padding=padding, tail=tail
                    )
                    dst_col.indexes.clear()
                    dst_col.indexes.update(src_col.indexes)
                    for src_file, dst_file in zip(src_col, dst_col):
                        src_to_dst_file_paths.append(
                            (src_file, dst_file)
                        )

                # replace original file name with hero name in repre doc
                for index in range(len(repre.get("files"))):
                    file = repre.get("files")[index]
                    file_name = os.path.basename(file.get('path'))
                    for src_file, dst_file in src_to_dst_file_paths:
                        src_file_name = os.path.basename(src_file)
                        if src_file_name == file_name:
                            repre["files"][index]["path"] = self._update_path(
                                anatomy, repre["files"][index]["path"],
                                src_file, dst_file)

                            repre["files"][index]["hash"] = self._update_hash(
                                repre["files"][index]["hash"],
                                src_file_name, dst_file
                            )

                schema.validate(repre)

                repre_name_low = repre["name"].lower()
                # Replace current representation
                if repre_name_low in old_repres_to_replace:
                    old_repre = old_repres_to_replace.pop(repre_name_low)
                    repre["_id"] = old_repre["_id"]
                    bulk_writes.append(
                        ReplaceOne(
                            {"_id": old_repre["_id"]},
                            repre
                        )
                    )

                # Unarchive representation
                elif repre_name_low in archived_repres_by_name:
                    archived_repre = archived_repres_by_name.pop(
                        repre_name_low
                    )
                    old_id = archived_repre["old_id"]
                    repre["_id"] = old_id
                    bulk_writes.append(
                        ReplaceOne(
                            {"old_id": old_id},
                            repre
                        )
                    )

                # Create representation
                else:
                    repre["_id"] = io.ObjectId()
                    bulk_writes.append(
                        InsertOne(repre)
                    )

            self.path_checks = []

            # Copy(hardlink) paths of source and destination files
            # TODO should we *only* create hardlinks?
            # TODO should we keep files for deletion until this is successful?
            for src_path, dst_path in src_to_dst_file_paths:
                self.copy_file(src_path, dst_path)

            for src_path, dst_path in other_file_paths_mapping:
                self.copy_file(src_path, dst_path)

            # Archive not replaced old representations
            for repre_name_low, repre in old_repres_to_delete.items():
                # Replace archived representation (This is backup)
                # - should not happen to have both repre and archived repre
                if repre_name_low in archived_repres_by_name:
                    archived_repre = archived_repres_by_name.pop(
                        repre_name_low
                    )
                    repre["old_id"] = repre["_id"]
                    repre["_id"] = archived_repre["_id"]
                    repre["type"] = archived_repre["type"]
                    bulk_writes.append(
                        ReplaceOne(
                            {"_id": archived_repre["_id"]},
                            repre
                        )
                    )

                else:
                    repre["old_id"] = repre["_id"]
                    repre["_id"] = io.ObjectId()
                    repre["type"] = "archived_representation"
                    bulk_writes.append(
                        InsertOne(repre)
                    )

            if bulk_writes:
                io._database[io.Session["AVALON_PROJECT"]].bulk_write(
                    bulk_writes
                )

            # Remove backuped previous hero
            if (
                backup_hero_publish_dir is not None and
                os.path.exists(backup_hero_publish_dir)
            ):
                shutil.rmtree(backup_hero_publish_dir)

        except Exception:
            if (
                backup_hero_publish_dir is not None and
                os.path.exists(backup_hero_publish_dir)
            ):
                os.rename(backup_hero_publish_dir, hero_publish_dir)
            self.log.error((
                "!!! Creating of hero version failed."
                " Previous hero version maybe lost some data!"
            ))
            raise

        self.log.debug((
            "--- hero version integration for subset `{}`"
            " seems to be successful."
        ).format(
            instance.data.get("subset", str(instance))
        ))
Esempio n. 6
0
def remap_to_published_texture(nodes, representation_id, dry_run=False):
    """For fixing previous bad implementations on texture management

    This is for remapping texture file path from arbitrarily work path
    to previous published path in published looks.

    For some reason, some texture path may not changed to published path
    after extraction. This is a fixing helper for the issue.

    (NOTE) The issue should be resolved in following commits. :')

    """
    file_nodes = cmds.ls(nodes, type="file")
    count, file_data = lib.profiling_file_nodes(file_nodes)
    if not count:
        return

    node_by_fpattern = dict()

    for data in file_data:
        data["pathMap"] = {
            fn: data["dir"] + "/" + fn for fn in data["fnames"]
        }

        fpattern = data["fpattern"]
        if fpattern not in node_by_fpattern:
            node_by_fpattern[fpattern] = list()
        node_by_fpattern[fpattern].append(data)

    repr = io.find_one({"_id": io.ObjectId(representation_id)})
    file_inventory = repr["data"].get("fileInventory")
    if not file_inventory:
        return

    resolved_by_fpattern = lib.resolve_file_profile(repr, file_inventory)

    # MAPPING

    for fpattern, file_datas in node_by_fpattern.items():
        if fpattern not in resolved_by_fpattern:
            fpattern_ = fpattern.rsplit(".", 1)[0]
            for resolved_fpattern in resolved_by_fpattern:
                if fpattern_ == resolved_fpattern.rsplit(".", 1)[0]:
                    versioned_data = resolved_by_fpattern[resolved_fpattern]
                    break
            else:
                continue
        else:
            versioned_data = resolved_by_fpattern[fpattern]

        data = file_datas[0]
        file_nodes = [dat["node"] for dat in file_datas]
        versioned_data.sort(key=lambda elem: elem[0]["version"],
                            reverse=True)  # elem: (data, tmp_data)

        for ver_data, resolved in versioned_data:

            previous_files = resolved["pathMap"]

            for file, abs_path in data["pathMap"].items():
                if file not in previous_files:
                    file = file.rsplit(".", 1)[0]
                    for pre_file in previous_files:
                        if file == pre_file.rsplit(".", 1)[0]:
                            file = pre_file
                            abs_previous = previous_files[pre_file]
                            ext = pre_file.rsplit(".", 1)[1]
                            abs_path = abs_path.rsplit(".", 1)[0] + "." + ext
                            if not os.path.isfile(abs_path):
                                continue
                            else:
                                break
                    else:
                        # Possible different file pattern
                        break  # Try previous version
                else:
                    abs_previous = previous_files[file]

                if not os.path.isfile(abs_previous):
                    # Previous file not exists (should not happen)
                    break  # Try previous version

                # (NOTE) We don't need to check on file size and modification
                #        time here since we are trying to map file to latest
                #        version of published one.

            else:
                # Version matched, consider as same file
                head_file = sorted(previous_files)[0]
                resolved_path = abs_previous[:-len(file)] + head_file
                embedded_path = env_embedded_path(resolved_path)
                fix_texture_file_nodes(file_nodes, embedded_path, dry_run)

                # Proceed to next pattern
                break

        else:
            # Not match with any previous version, this should not happen
            log.warning("No version matched.")
            with open(dry_run, "a") as path_log:
                path_log.write("\n * " + data["dir"] + "/" + fpattern + "\n\n")
    def unpack_textures(self, container):
        import os
        import shutil
        from maya import cmds, mel
        from avalon import api, io

        project = io.find_one({"type": "project"},
                              projection={"name": True,
                                          "config.template.publish": True})
        asset = io.find_one({"_id": io.ObjectId(container["assetId"])},
                            projection={"name": True, "silo": True})
        subset = io.find_one({"_id": io.ObjectId(container["subsetId"])},
                             projection={"name": True})
        version = io.find_one({"_id": io.ObjectId(container["versionId"])},
                              projection={"name": True,
                                          "data.dependencies": True})
        # Find TexturePack
        id = next(iter(version["data"]["dependencies"]))
        dep_version = io.find_one({"_id": io.ObjectId(id)})
        dep_subset = io.find_one({"_id": dep_version["parent"]})
        dep_representation = io.find_one({"parent": dep_version["_id"],
                                          "name": "TexturePack"})
        # List texture versions
        published = dict()
        template_publish = project["config"]["template"]["publish"]
        for data in dep_representation["data"]["fileInventory"]:
            path = template_publish.format(
                root=api.registered_root(),
                project=project["name"],
                silo=asset["silo"],
                asset=asset["name"],
                subset=dep_subset["name"],
                version=data["version"],
                representation="TexturePack",
            )
            published[data["version"]] = path

        # Collect path,
        # filter out textures that is being used in this look
        file_nodes = cmds.ls(cmds.sets(container["objectName"], query=True),
                             type="file")
        files = dict()
        for node in file_nodes:
            path = cmds.getAttr(node + ".fileTextureName",
                                expandEnvironmentVariables=True)
            if not os.path.isfile(path):
                continue

            for v, p in published.items():
                if path.startswith(p):
                    key = (v, p)
                    if key not in files:
                        files[key] = list()
                    files[key].append(node)
                    break

        # Copy textures and change path
        root = cmds.workspace(query=True, rootDirectory=True)
        root += mel.eval('workspace -query -fileRuleEntry "sourceImages"')
        root += "/_unpacked"

        pattern = "/{asset}/{subset}.v{version:0>3}/TexturePack.v{texture:0>3}"
        for (texture_version, src), nodes in files.items():
            dst = root + pattern.format(asset=asset["name"],
                                        subset=subset["name"],
                                        version=version["name"],
                                        texture=texture_version)
            for node in nodes:
                attr = node + ".fileTextureName"
                path = cmds.getAttr(attr, expandEnvironmentVariables=True)
                tail = path.split("TexturePack")[-1]
                cmds.setAttr(attr, dst + tail, type="string")

            if os.path.isdir(dst):
                continue

            shutil.copytree(src, dst)
def __get_representation(namespace):
    container_node = pipeline.get_container_from_namespace(namespace)
    _id = io.ObjectId(cmds.getAttr(container_node + ".representation"))
    return io.find_one({"_id": _id})