Exemple #1
0
    def process(self, context):

        # Collected units
        linearunits = context.data('linearUnits')
        angularunits = context.data('angularUnits')
        # TODO(antirotor): This is hack as for framerates having multiple
        # decimal places. FTrack is ceiling decimal values on
        # fps to two decimal places but Maya 2019+ is reporting those fps
        # with much higher resolution. As we currently cannot fix Ftrack
        # rounding, we have to round those numbers coming from Maya.
        fps = float_round(context.data['fps'], 2, ceil)

        asset_fps = lib.get_asset()["data"]["fps"]

        self.log.info('Units (linear): {0}'.format(linearunits))
        self.log.info('Units (angular): {0}'.format(angularunits))
        self.log.info('Units (time): {0} FPS'.format(fps))

        # Check if units are correct
        assert linearunits and linearunits == 'cm', ("Scene linear units must "
                                                     "be centimeters")

        assert angularunits and angularunits == 'deg', ("Scene angular units "
                                                        "must be degrees")
        assert fps and fps == asset_fps, "Scene must be {} FPS"\
                                         "(now is {})".format(asset_fps, fps)
Exemple #2
0
def validate_fps():
    """Validate current scene FPS and show pop-up when it is incorrect

    Returns:
        bool

    """

    fps = lib.get_asset()["data"]["fps"]
    current_fps = hou.fps()  # returns float

    if current_fps != fps:

        from ...widgets import popup

        # Find main window
        parent = hou.ui.mainQtWindow()
        if parent is None:
            pass
        else:
            dialog = popup.Popup2(parent=parent)
            dialog.setModal(True)
            dialog.setWindowTitle("Maya scene not in line with project")
            dialog.setMessage("The FPS is out of sync, please fix")

            # Set new text for button (add optional argument for the popup?)
            toggle = dialog.widgets["toggle"]
            toggle.setEnabled(False)
            dialog.on_show.connect(lambda: set_scene_fps(fps))

            dialog.show()

            return False

    return True
Exemple #3
0
def get_asset_settings():
    asset_data = lib.get_asset()["data"]
    fps = asset_data.get("fps")
    frame_start = asset_data.get("frameStart")
    frame_end = asset_data.get("frameEnd")
    resolution_width = asset_data.get("resolutionWidth")
    resolution_height = asset_data.get("resolutionHeight")

    return {
        "fps": fps,
        "frameStart": frame_start,
        "frameEnd": frame_end,
        "resolutionWidth": resolution_width,
        "resolutionHeight": resolution_height
    }
Exemple #4
0
    def repair(cls, context):
        """Fix the current FPS setting of the scene, set to PAL(25.0 fps)"""

        cls.log.info("Setting angular unit to 'degrees'")
        cmds.currentUnit(angle="degree")
        current_angle = cmds.currentUnit(query=True, angle=True)
        cls.log.debug(current_angle)

        cls.log.info("Setting linear unit to 'centimeter'")
        cmds.currentUnit(linear="centimeter")
        current_linear = cmds.currentUnit(query=True, linear=True)
        cls.log.debug(current_linear)

        cls.log.info("Setting time unit to match project")
        asset_fps = lib.get_asset()["data"]["fps"]
        mayalib.set_scene_fps(asset_fps)
Exemple #5
0
def get_asset_settings():
    """Get settings on current asset from database.

    Returns:
        dict: Scene data.

    """
    asset_data = lib.get_asset()["data"]
    fps = asset_data.get("fps")
    frame_start = asset_data.get("frameStart")
    frame_end = asset_data.get("frameEnd")
    handle_start = asset_data.get("handleStart")
    handle_end = asset_data.get("handleEnd")
    resolution_width = asset_data.get("resolutionWidth")
    resolution_height = asset_data.get("resolutionHeight")
    entity_type = asset_data.get("entityType")

    scene_data = {
        "fps": fps,
        "frameStart": frame_start,
        "frameEnd": frame_end,
        "handleStart": handle_start,
        "handleEnd": handle_end,
        "resolutionWidth": resolution_width,
        "resolutionHeight": resolution_height
    }
    settings = get_current_project_settings()

    try:
        skip_resolution_check = \
            settings["harmony"]["general"]["skip_resolution_check"]
        skip_timelines_check = \
            settings["harmony"]["general"]["skip_timelines_check"]
    except KeyError:
        skip_resolution_check = []
        skip_timelines_check = []

    if os.getenv('AVALON_TASK') in skip_resolution_check:
        scene_data.pop("resolutionWidth")
        scene_data.pop("resolutionHeight")

    if entity_type in skip_timelines_check:
        scene_data.pop('frameStart', None)
        scene_data.pop('frameEnd', None)

    return scene_data
Exemple #6
0
    def process(self, instance):
        # remove context test attribute
        if instance.context.data.get("subsetNamesCheck"):
            instance.context.data.pop("subsetNamesCheck")

        self.log.debug(f"__ instance: `{instance}`")
        # get representation with editorial file
        for representation in instance.data["representations"]:
            self.log.debug(f"__ representation: `{representation}`")
            # make editorial sequence file path
            staging_dir = representation["stagingDir"]
            file_path = os.path.join(staging_dir, str(representation["files"]))
            instance.context.data["currentFile"] = file_path

            # get video file path
            video_path = None
            basename = os.path.splitext(os.path.basename(file_path))[0]
            for f in os.listdir(staging_dir):
                self.log.debug(f"__ test file: `{f}`")
                # filter out by not sharing the same name
                if os.path.splitext(f)[0] not in basename:
                    continue
                # filter out by respected extensions
                if os.path.splitext(f)[1] not in self.extensions:
                    continue
                video_path = os.path.join(staging_dir, f)
                self.log.debug(f"__ video_path: `{video_path}`")
            instance.data["editorialVideoPath"] = video_path
            instance.data["stagingDir"] = staging_dir

            # get editorial sequence file into otio timeline object
            extension = os.path.splitext(file_path)[1]
            kwargs = {}
            if extension == ".edl":
                # EDL has no frame rate embedded so needs explicit
                # frame rate else 24 is asssumed.
                kwargs["rate"] = plib.get_asset()["data"]["fps"]

            instance.data["otio_timeline"] = otio.adapters.read_from_file(
                file_path, **kwargs)

            self.log.info(f"Added OTIO timeline from: `{file_path}`")
Exemple #7
0
    def process(self, instance):
        ctx_data = instance.context.data
        asset_name = ctx_data["asset"]
        asset = lib.get_asset(asset_name)
        asset_data = asset["data"]

        # These attributes will be checked
        attributes = [
            "fps", "frameStart", "frameEnd", "resolutionWidth",
            "resolutionHeight", "handleStart", "handleEnd"
        ]

        # Value of these attributes can be found on parents
        hierarchical_attributes = [
            "fps", "resolutionWidth", "resolutionHeight", "pixelAspect",
            "handleStart", "handleEnd"
        ]

        missing_attributes = []
        asset_attributes = {}
        for attr in attributes:
            if attr in asset_data:
                asset_attributes[attr] = asset_data[attr]

            elif attr in hierarchical_attributes:
                # Try to find fps on parent
                parent = asset['parent']
                if asset_data['visualParent'] is not None:
                    parent = asset_data['visualParent']

                value = self.check_parent_hierarchical(parent, attr)
                if value is None:
                    missing_attributes.append(attr)
                else:
                    asset_attributes[attr] = value
            else:
                missing_attributes.append(attr)

        # Raise error if attributes weren't found on asset in database
        if len(missing_attributes) > 0:
            atr = ", ".join(missing_attributes)
            msg = 'Missing attributes "{}" in asset "{}"'
            message = msg.format(atr, asset_name)
            raise ValueError(message)

        # Get handles from database, Default is 0 (if not found)
        handle_start = 0
        handle_end = 0
        if "handleStart" in asset_attributes:
            handle_start = asset_attributes["handleStart"]
        if "handleEnd" in asset_attributes:
            handle_end = asset_attributes["handleEnd"]

        asset_attributes["fps"] = float("{0:.4f}".format(
            asset_attributes["fps"]))

        # Get values from nukescript
        script_attributes = {
            "handleStart": ctx_data["handleStart"],
            "handleEnd": ctx_data["handleEnd"],
            "fps": float("{0:.4f}".format(ctx_data["fps"])),
            "frameStart": ctx_data["frameStart"],
            "frameEnd": ctx_data["frameEnd"],
            "resolutionWidth": ctx_data["resolutionWidth"],
            "resolutionHeight": ctx_data["resolutionHeight"],
            "pixelAspect": ctx_data["pixelAspect"]
        }

        # Compare asset's values Nukescript X Database
        not_matching = []
        for attr in attributes:
            self.log.debug("asset vs script attribute \"{}\": {}, {}".format(
                attr, asset_attributes[attr], script_attributes[attr]))
            if asset_attributes[attr] != script_attributes[attr]:
                not_matching.append(attr)

        # Raise error if not matching
        if len(not_matching) > 0:
            msg = "Attributes '{}' are not set correctly"
            # Alert user that handles are set if Frame start/end not match
            if ((("frameStart" in not_matching) or
                 ("frameEnd" in not_matching))
                    and ((handle_start > 0) or (handle_end > 0))):
                msg += " (`handle_start` are set to {})".format(handle_start)
                msg += " (`handle_end` are set to {})".format(handle_end)
            message = msg.format(", ".join(not_matching))
            raise ValueError(message)
Exemple #8
0
    def process(self, instance):
        # get context
        context = instance.context

        instance_data_filter = ["editorialSourceRoot", "editorialSourcePath"]

        # attribute for checking duplicity during creation
        if not context.data.get("assetNameCheck"):
            context.data["assetNameCheck"] = list()

        # create asset_names conversion table
        if not context.data.get("assetsShared"):
            context.data["assetsShared"] = dict()

        # get timeline otio data
        timeline = instance.data["otio_timeline"]
        fps = plib.get_asset()["data"]["fps"]

        tracks = timeline.each_child(
            descended_from_type=otio.schema.track.Track)

        # get data from avalon
        asset_entity = instance.context.data["assetEntity"]
        asset_data = asset_entity["data"]
        asset_name = asset_entity["name"]

        # Timeline data.
        handle_start = int(asset_data["handleStart"])
        handle_end = int(asset_data["handleEnd"])

        for track in tracks:
            self.log.debug(f"track.name: {track.name}")
            try:
                track_start_frame = (abs(track.source_range.start_time.value))
                self.log.debug(f"track_start_frame: {track_start_frame}")
                track_start_frame -= self.timeline_frame_start
            except AttributeError:
                track_start_frame = 0

            self.log.debug(f"track_start_frame: {track_start_frame}")

            for clip in track.each_child():
                if clip.name is None:
                    continue

                # skip all generators like black ampty
                if isinstance(clip.media_reference,
                              otio.schema.GeneratorReference):
                    continue

                # Transitions are ignored, because Clips have the full frame
                # range.
                if isinstance(clip, otio.schema.transition.Transition):
                    continue

                # basic unique asset name
                clip_name = os.path.splitext(clip.name)[0].lower()
                name = f"{asset_name.split('_')[0]}_{clip_name}"

                if name not in context.data["assetNameCheck"]:
                    context.data["assetNameCheck"].append(name)
                else:
                    self.log.warning(f"duplicate shot name: {name}")

                # frame ranges data
                clip_in = clip.range_in_parent().start_time.value
                clip_in += track_start_frame
                clip_out = clip.range_in_parent().end_time_inclusive().value
                clip_out += track_start_frame
                self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")

                # add offset in case there is any
                if self.timeline_frame_offset:
                    clip_in += self.timeline_frame_offset
                    clip_out += self.timeline_frame_offset

                clip_duration = clip.duration().value
                self.log.info(f"clip duration: {clip_duration}")

                source_in = clip.trimmed_range().start_time.value
                source_out = source_in + clip_duration
                source_in_h = source_in - handle_start
                source_out_h = source_out + handle_end

                clip_in_h = clip_in - handle_start
                clip_out_h = clip_out + handle_end

                # define starting frame for future shot
                if self.custom_start_frame is not None:
                    frame_start = self.custom_start_frame
                else:
                    frame_start = clip_in

                frame_end = frame_start + (clip_duration - 1)

                # create shared new instance data
                instance_data = {
                    # shared attributes
                    "asset": name,
                    "assetShareName": name,
                    "item": clip,
                    "clipName": clip_name,

                    # parent time properities
                    "trackStartFrame": track_start_frame,
                    "handleStart": handle_start,
                    "handleEnd": handle_end,
                    "fps": fps,

                    # media source
                    "sourceIn": source_in,
                    "sourceOut": source_out,
                    "sourceInH": source_in_h,
                    "sourceOutH": source_out_h,

                    # timeline
                    "clipIn": clip_in,
                    "clipOut": clip_out,
                    "clipDuration": clip_duration,
                    "clipInH": clip_in_h,
                    "clipOutH": clip_out_h,
                    "clipDurationH": clip_duration + handle_start + handle_end,

                    # task
                    "frameStart": frame_start,
                    "frameEnd": frame_end,
                    "frameStartH": frame_start - handle_start,
                    "frameEndH": frame_end + handle_end
                }

                for data_key in instance_data_filter:
                    instance_data.update(
                        {data_key: instance.data.get(data_key)})

                # adding subsets to context as instances
                for subset, properities in self.subsets.items():
                    # adding Review-able instance
                    subset_instance_data = instance_data.copy()
                    subset_instance_data.update(properities)
                    subset_instance_data.update({
                        # unique attributes
                        "name": f"{name}_{subset}",
                        "label": f"{name} {subset} ({clip_in}-{clip_out})",
                        "subset": subset
                    })
                    # create new instance
                    _instance = instance.context.create_instance(
                        **subset_instance_data)
                    self.log.debug(
                        f"Instance: `{_instance}` | "
                        f"families: `{subset_instance_data['families']}`")

                context.data["assetsShared"][name] = {
                    "_clipIn": clip_in,
                    "_clipOut": clip_out
                }

                self.log.debug("Instance: `{}` | families: `{}`")
Exemple #9
0
    def process(self, instance):
        representation = instance.data["representations"][0]
        file_path = os.path.join(representation["stagingDir"],
                                 representation["files"])
        instance.context.data["editorialPath"] = file_path

        extension = os.path.splitext(file_path)[1][1:]
        kwargs = {}
        if extension == "edl":
            # EDL has no frame rate embedded so needs explicit frame rate else
            # 24 is asssumed.
            kwargs["rate"] = lib.get_asset()["data"]["fps"]

        timeline = otio.adapters.read_from_file(file_path, **kwargs)
        tracks = timeline.each_child(
            descended_from_type=otio.schema.track.Track)
        asset_entity = instance.context.data["assetEntity"]
        asset_name = asset_entity["name"]

        # Ask user for sequence start. Usually 10:00:00:00.
        sequence_start_frame = 900000

        # Project specific prefix naming. This needs to be replaced with some
        # options to be more flexible.
        asset_name = asset_name.split("_")[0]

        instances = []
        for track in tracks:
            track_start_frame = (abs(track.source_range.start_time.value) -
                                 sequence_start_frame)
            for child in track.each_child():

                # Transitions are ignored, because Clips have the full frame
                # range.
                if isinstance(child, otio.schema.transition.Transition):
                    continue

                if child.name is None:
                    continue

                # Hardcoded to expect a shot name of "[name].[extension]"
                child_name = os.path.splitext(child.name)[0].lower()
                name = f"{asset_name}_{child_name}"

                frame_start = track_start_frame
                frame_start += child.range_in_parent().start_time.value
                frame_end = track_start_frame
                frame_end += child.range_in_parent().end_time_inclusive().value

                label = f"{name} (framerange: {frame_start}-{frame_end})"
                instances.append(
                    instance.context.create_instance(
                        **{
                            "name": name,
                            "label": label,
                            "frameStart": frame_start,
                            "frameEnd": frame_end,
                            "family": "shot",
                            "families": ["review", "ftrack"],
                            "ftrackFamily": "review",
                            "asset": name,
                            "subset": "shotMain",
                            "representations": [],
                            "source": file_path
                        }))

        visual_hierarchy = [asset_entity]
        while True:
            visual_parent = io.find_one(
                {"_id": visual_hierarchy[-1]["data"]["visualParent"]})
            if visual_parent:
                visual_hierarchy.append(visual_parent)
            else:
                visual_hierarchy.append(instance.context.data["projectEntity"])
                break

        context_hierarchy = None
        for entity in visual_hierarchy:
            childs = {}
            if context_hierarchy:
                name = context_hierarchy.pop("name")
                childs = {name: context_hierarchy}
            else:
                for instance in instances:
                    childs[instance.data["name"]] = {
                        "childs": {},
                        "entity_type": "Shot",
                        "custom_attributes": {
                            "frameStart": instance.data["frameStart"],
                            "frameEnd": instance.data["frameEnd"]
                        }
                    }

            context_hierarchy = {
                "entity_type": entity["data"]["entityType"],
                "childs": childs,
                "name": entity["name"]
            }

        name = context_hierarchy.pop("name")
        context_hierarchy = {name: context_hierarchy}
        instance.context.data["hierarchyContext"] = context_hierarchy
        self.log.info(
            "Hierarchy:\n" +
            json_util.dumps(context_hierarchy, sort_keys=True, indent=4))
    def process(self, instance):
        root_dir = None
        # remove context test attribute
        if instance.context.data.get("subsetNamesCheck"):
            instance.context.data.pop("subsetNamesCheck")

        self.log.debug(f"__ instance: `{instance}`")
        # get representation with editorial file
        for representation in instance.data["representations"]:
            self.log.debug(f"__ representation: `{representation}`")
            # make editorial sequence file path
            staging_dir = representation["stagingDir"]
            file_path = os.path.join(staging_dir, str(representation["files"]))
            instance.context.data["currentFile"] = file_path

            # get video file path
            video_path = None
            basename = os.path.splitext(os.path.basename(file_path))[0]

            if self.source_dir:
                source_dir = self.source_dir.replace("\\", "/")
                if ("./" in source_dir) or ("../" in source_dir):
                    # get current working dir
                    cwd = os.getcwd()
                    # set cwd to staging dir for absolute path solving
                    os.chdir(staging_dir)
                    root_dir = os.path.abspath(source_dir)
                    # set back original cwd
                    os.chdir(cwd)
                elif "{" in source_dir:
                    root_dir = source_dir
                else:
                    root_dir = os.path.normpath(source_dir)

            if root_dir:
                # search for source data will need to be done
                instance.data["editorialSourceRoot"] = root_dir
                instance.data["editorialSourcePath"] = None
            else:
                # source data are already found
                for f in os.listdir(staging_dir):
                    # filter out by not sharing the same name
                    if os.path.splitext(f)[0] not in basename:
                        continue
                    # filter out by respected extensions
                    if os.path.splitext(f)[1] not in self.extensions:
                        continue
                    video_path = os.path.join(staging_dir, f)
                    self.log.debug(f"__ video_path: `{video_path}`")
                instance.data["editorialSourceRoot"] = staging_dir
                instance.data["editorialSourcePath"] = video_path

            instance.data["stagingDir"] = staging_dir

            # get editorial sequence file into otio timeline object
            extension = os.path.splitext(file_path)[1]
            kwargs = {}
            if extension == ".edl":
                # EDL has no frame rate embedded so needs explicit
                # frame rate else 24 is asssumed.
                kwargs["rate"] = plib.get_asset()["data"]["fps"]

            instance.data["otio_timeline"] = otio.adapters.read_from_file(
                file_path, **kwargs)

            self.log.info(f"Added OTIO timeline from: `{file_path}`")