コード例 #1
0
ファイル: query_tests.py プロジェクト: zoombulous/openshot-qt
    def test_add_clip(self):
        """ Test the Clip.save method by adding multiple clips """

        # Import additional classes that need the app defined first
        from classes.query import Clip

        # Find number of clips in project
        num_clips = len(Clip.filter())

        # Create clip
        c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))

        # Parse JSON
        clip_data = json.loads(c.Json())

        # Insert into project data
        query_clip = Clip()
        query_clip.data = clip_data
        query_clip.save()

        self.assertTrue(query_clip)
        self.assertEqual(len(Clip.filter()), num_clips + 1)

        # Save the clip again (which should not change the total # of clips)
        query_clip.save()

        self.assertEqual(len(Clip.filter()), num_clips + 1)
コード例 #2
0
ファイル: query_tests.py プロジェクト: JoOngle2/openshot-qt
    def test_add_clip(self):
        """ Test the Clip.save method by adding multiple clips """

        # Import additional classes that need the app defined first
        from classes.query import Clip

        # Find number of clips in project
        num_clips = len(Clip.filter())

        # Create clip
        c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))

        # Parse JSON
        clip_data = json.loads(c.Json())

        # Insert into project data
        query_clip = Clip()
        query_clip.data = clip_data
        query_clip.save()

        self.assertTrue(query_clip)
        self.assertEqual(len(Clip.filter()), num_clips + 1)

        # Save the clip again (which should not change the total # of clips)
        query_clip.save()

        self.assertEqual(len(Clip.filter()), num_clips + 1)
コード例 #3
0
ファイル: query_tests.py プロジェクト: zoombulous/openshot-qt
    def setUpClass(TestQueryClass):
        """ Init unit test data """
        # Create Qt application
        TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest")
        TestQueryClass.clip_ids = []
        TestQueryClass.file_ids = []
        TestQueryClass.transition_ids = []

        # Import additional classes that need the app defined first
        from classes.query import Clip, File, Transition

        # Insert some clips into the project data
        for num in range(5):
            # Create clip
            c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))

            # Parse JSON
            clip_data = json.loads(c.Json())

            # Insert into project data
            query_clip = Clip()
            query_clip.data = clip_data
            query_clip.save()

            # Keep track of the ids
            TestQueryClass.clip_ids.append(query_clip.id)

        # Insert some files into the project data
        for num in range(5):
            # Create file
            r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0)

            # Parse JSON
            file_data = json.loads(r.Json())

            # Insert into project data
            query_file = File()
            query_file.data = file_data
            query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png")
            query_file.data["media_type"] = "image"
            query_file.save()

            # Keep track of the ids
            TestQueryClass.file_ids.append(query_file.id)

        # Insert some transitions into the project data
        for num in range(5):
            # Create mask object
            transition_object = openshot.Mask()
            transitions_data = json.loads(transition_object.Json())

            # Insert into project data
            query_transition = Transition()
            query_transition.data = transitions_data
            query_transition.save()

            # Keep track of the ids
            TestQueryClass.transition_ids.append(query_transition.id)
コード例 #4
0
ファイル: query_tests.py プロジェクト: Algomorph/openshot-qt
    def setUpClass(TestQueryClass):
        """ Init unit test data """
        # Create Qt application
        TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest")
        TestQueryClass.clip_ids = []
        TestQueryClass.file_ids = []
        TestQueryClass.transition_ids = []

        # Import additional classes that need the app defined first
        from classes.query import Clip, File, Transition

        # Insert some clips into the project data
        for num in range(5):
            # Create clip
            c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))

            # Parse JSON
            clip_data = json.loads(c.Json())

            # Insert into project data
            query_clip = Clip()
            query_clip.data = clip_data
            query_clip.save()

            # Keep track of the ids
            TestQueryClass.clip_ids.append(query_clip.id)

        # Insert some files into the project data
        for num in range(5):
            # Create file
            r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0)

            # Parse JSON
            file_data = json.loads(r.Json())

            # Insert into project data
            query_file = File()
            query_file.data = file_data
            query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png")
            query_file.data["media_type"] = "image"
            query_file.save()

            # Keep track of the ids
            TestQueryClass.file_ids.append(query_file.id)

        # Insert some transitions into the project data
        for num in range(5):
            # Create mask object
            transition_object = openshot.Mask()
            transitions_data = json.loads(transition_object.Json())

            # Insert into project data
            query_transition = Transition()
            query_transition.data = transitions_data
            query_transition.save()

            # Keep track of the ids
            TestQueryClass.transition_ids.append(query_transition.id)
コード例 #5
0
    def test_add_clip(self):

        # Find number of clips in project
        num_clips = len(Clip.filter())

        # Create clip
        c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
        clip_data = json.loads(c.Json())

        # Insert into project data
        query_clip = Clip()
        query_clip.data = clip_data
        query_clip.save()

        self.assertTrue(query_clip)
        self.assertEqual(len(Clip.filter()), num_clips + 1)

        # Save the clip again (which should not change the total # of clips)
        query_clip.save()
        self.assertEqual(len(Clip.filter()), num_clips + 1)
コード例 #6
0
def import_xml():
    """Import final cut pro XML file"""
    app = get_app()
    _ = app._tr

    # Get FPS info
    fps_num = app.project.get("fps").get("num", 24)
    fps_den = app.project.get("fps").get("den", 1)
    fps_float = float(fps_num / fps_den)

    # Get XML path
    recommended_path = app.project.current_filepath or ""
    if not recommended_path:
        recommended_path = info.HOME_PATH
    else:
        recommended_path = os.path.dirname(recommended_path)
    file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."),
                                            recommended_path,
                                            _("Final Cut Pro (*.xml)"),
                                            _("Final Cut Pro (*.xml)"))[0]

    if not file_path or not os.path.exists(file_path):
        # User canceled dialog
        return

    # Parse XML file
    xmldoc = minidom.parse(file_path)

    # Get video tracks
    video_tracks = []
    for video_element in xmldoc.getElementsByTagName("video"):
        for video_track in video_element.getElementsByTagName("track"):
            video_tracks.append(video_track)
    audio_tracks = []
    for audio_element in xmldoc.getElementsByTagName("audio"):
        for audio_track in audio_element.getElementsByTagName("track"):
            audio_tracks.append(audio_track)

    # Loop through tracks
    track_index = 0
    for tracks in [audio_tracks, video_tracks]:
        for track_element in tracks:
            # Get clipitems on this track (if any)
            clips_on_track = track_element.getElementsByTagName("clipitem")
            if not clips_on_track:
                continue

            # Get # of tracks
            track_index += 1
            all_tracks = app.project.get("layers")
            track_number = list(
                reversed(sorted(
                    all_tracks,
                    key=itemgetter('number'))))[0].get("number") + 1000000

            # Create new track above existing layer(s)
            track = Track()
            is_locked = False
            if track_element.getElementsByTagName(
                    "locked")[0].childNodes[0].nodeValue == "TRUE":
                is_locked = True
            track.data = {
                "number": track_number,
                "y": 0,
                "label": "XML Import %s" % track_index,
                "lock": is_locked
            }
            track.save()

            # Loop through clips
            for clip_element in clips_on_track:
                # Get clip path
                xml_file_id = clip_element.getElementsByTagName(
                    "file")[0].getAttribute("id")
                clip_path = ""
                if clip_element.getElementsByTagName("pathurl"):
                    clip_path = clip_element.getElementsByTagName(
                        "pathurl")[0].childNodes[0].nodeValue
                else:
                    # Skip clipitem if no clippath node found
                    # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this)
                    continue

                clip_path, is_modified, is_skipped = find_missing_file(
                    clip_path)
                if is_skipped:
                    continue

                # Check for this path in our existing project data
                file = File.get(path=clip_path)

                # Load filepath in libopenshot clip object (which will try multiple readers to open it)
                clip_obj = openshot.Clip(clip_path)

                if not file:
                    # Get the JSON for the clip's internal reader
                    try:
                        reader = clip_obj.Reader()
                        file_data = json.loads(reader.Json())

                        # Determine media type
                        if file_data["has_video"] and not is_image(file_data):
                            file_data["media_type"] = "video"
                        elif file_data["has_video"] and is_image(file_data):
                            file_data["media_type"] = "image"
                        elif file_data[
                                "has_audio"] and not file_data["has_video"]:
                            file_data["media_type"] = "audio"

                        # Save new file to the project data
                        file = File()
                        file.data = file_data

                        # Save file
                        file.save()
                    except Exception:
                        # Ignore errors for now
                        pass

                if (file.data["media_type"] == "video"
                        or file.data["media_type"] == "image"):
                    # Determine thumb path
                    thumb_path = os.path.join(info.THUMBNAIL_PATH,
                                              "%s.png" % file.data["id"])
                else:
                    # Audio file
                    thumb_path = os.path.join(info.PATH, "images",
                                              "AudioThumbnail.png")

                # Create Clip object
                clip = Clip()
                clip.data = json.loads(clip_obj.Json())
                clip.data["file_id"] = file.id
                clip.data["title"] = clip_element.getElementsByTagName(
                    "name")[0].childNodes[0].nodeValue
                clip.data["layer"] = track.data.get("number", 1000000)
                clip.data["image"] = thumb_path
                clip.data["position"] = float(
                    clip_element.getElementsByTagName("start")
                    [0].childNodes[0].nodeValue) / fps_float
                clip.data["start"] = float(
                    clip_element.getElementsByTagName("in")
                    [0].childNodes[0].nodeValue) / fps_float
                clip.data["end"] = float(
                    clip_element.getElementsByTagName("out")
                    [0].childNodes[0].nodeValue) / fps_float

                # Loop through clip's effects
                for effect_element in clip_element.getElementsByTagName(
                        "effect"):
                    effectid = effect_element.getElementsByTagName(
                        "effectid")[0].childNodes[0].nodeValue
                    keyframes = effect_element.getElementsByTagName("keyframe")
                    if effectid == "opacity":
                        clip.data["alpha"] = {"Points": []}
                        for keyframe_element in keyframes:
                            keyframe_time = float(
                                keyframe_element.getElementsByTagName("when")
                                [0].childNodes[0].nodeValue)
                            keyframe_value = float(
                                keyframe_element.getElementsByTagName("value")
                                [0].childNodes[0].nodeValue) / 100.0
                            clip.data["alpha"]["Points"].append({
                                "co": {
                                    "X": round(keyframe_time),
                                    "Y": keyframe_value
                                },
                                "interpolation":
                                1  # linear
                            })
                    elif effectid == "audiolevels":
                        clip.data["volume"] = {"Points": []}
                        for keyframe_element in keyframes:
                            keyframe_time = float(
                                keyframe_element.getElementsByTagName("when")
                                [0].childNodes[0].nodeValue)
                            keyframe_value = float(
                                keyframe_element.getElementsByTagName("value")
                                [0].childNodes[0].nodeValue) / 100.0
                            clip.data["volume"]["Points"].append({
                                "co": {
                                    "X": round(keyframe_time),
                                    "Y": keyframe_value
                                },
                                "interpolation":
                                1  # linear
                            })

                # Save clip
                clip.save()

            # Update the preview and reselect current frame in properties
            app.window.refreshFrameSignal.emit()
            app.window.propertyTableView.select_frame(
                app.window.preview_thread.player.Position())
コード例 #7
0
    def accept(self):
        """ Ok button clicked """
        log.info('accept')

        # Get settings from form
        start_position = self.txtStartTime.value()
        track_num = self.cmbTrack.currentData()
        fade_value = self.cmbFade.currentData()
        fade_length = self.txtFadeLength.value()
        transition_path = self.cmbTransition.currentData()
        transition_length = self.txtTransitionLength.value()
        image_length = self.txtImageLength.value()
        zoom_value = self.cmbZoom.currentData()

        # Init position
        position = start_position

        random_transition = False
        if transition_path == "random":
            random_transition = True

        # Get frames per second
        fps = get_app().project.get(["fps"])
        fps_float = float(fps["num"]) / float(fps["den"])

        # Loop through each file (in the current order)
        for file in self.treeFiles.timeline_model.files:
            # Create a clip
            clip = Clip()
            clip.data = {}

            if (file.data["media_type"] == "video"
                    or file.data["media_type"] == "image"):
                # Determine thumb path
                thumb_path = os.path.join(info.THUMBNAIL_PATH,
                                          "%s.png" % file.data["id"])
            else:
                # Audio file
                thumb_path = os.path.join(info.PATH, "images",
                                          "AudioThumbnail.png")

            # Get file name
            path, filename = os.path.split(file.data["path"])

            # Convert path to the correct relative path (based on this folder)
            file_path = file.absolute_path()

            # Create clip object for this file
            c = openshot.Clip(file_path)

            # Append missing attributes to Clip JSON
            new_clip = json.loads(c.Json())
            new_clip["position"] = position
            new_clip["layer"] = track_num
            new_clip["file_id"] = file.id
            new_clip["title"] = filename
            new_clip["image"] = thumb_path

            # Skip any clips that are missing a 'reader' attribute
            # TODO: Determine why this even happens, as it shouldn't be possible
            if not new_clip.get("reader"):
                continue  # Skip to next file

            # Overwrite frame rate (incase the user changed it in the File Properties)
            file_properties_fps = float(file.data["fps"]["num"]) / float(
                file.data["fps"]["den"])
            file_fps = float(new_clip["reader"]["fps"]["num"]) / float(
                new_clip["reader"]["fps"]["den"])
            fps_diff = file_fps / file_properties_fps
            new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"]
            new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"]
            # Scale duration / length / and end properties
            new_clip["reader"]["duration"] *= fps_diff
            new_clip["end"] *= fps_diff
            new_clip["duration"] *= fps_diff

            # Check for optional start and end attributes
            start_time = 0
            end_time = new_clip["reader"]["duration"]

            if 'start' in file.data.keys():
                start_time = file.data['start']
                new_clip["start"] = start_time
            if 'end' in file.data.keys():
                end_time = file.data['end']
                new_clip["end"] = end_time

            # Adjust clip duration, start, and end
            new_clip["duration"] = new_clip["reader"]["duration"]
            if file.data["media_type"] == "image":
                end_time = image_length
                new_clip["end"] = end_time

            # Adjust Fade of Clips (if no transition is chosen)
            if not transition_path:
                if fade_value != None:
                    # Overlap this clip with the previous one (if any)
                    position = max(start_position,
                                   new_clip["position"] - fade_length)
                    new_clip["position"] = position

                if fade_value == 'Fade In' or fade_value == 'Fade In & Out':
                    start = openshot.Point(
                        round(start_time * fps_float) + 1, 0.0,
                        openshot.BEZIER)
                    start_object = json.loads(start.Json())
                    end = openshot.Point(
                        min(
                            round((start_time + fade_length) * fps_float) + 1,
                            round(end_time * fps_float) + 1), 1.0,
                        openshot.BEZIER)
                    end_object = json.loads(end.Json())
                    new_clip['alpha']["Points"].append(start_object)
                    new_clip['alpha']["Points"].append(end_object)

                if fade_value == 'Fade Out' or fade_value == 'Fade In & Out':
                    start = openshot.Point(
                        max(
                            round((end_time * fps_float) + 1) -
                            (round(fade_length * fps_float) + 1),
                            round(start_time * fps_float) + 1), 1.0,
                        openshot.BEZIER)
                    start_object = json.loads(start.Json())
                    end = openshot.Point(
                        round(end_time * fps_float) + 1, 0.0, openshot.BEZIER)
                    end_object = json.loads(end.Json())
                    new_clip['alpha']["Points"].append(start_object)
                    new_clip['alpha']["Points"].append(end_object)

            # Adjust zoom amount
            if zoom_value != None:
                # Location animation
                if zoom_value == "Random":
                    animate_start_x = uniform(-0.5, 0.5)
                    animate_end_x = uniform(-0.15, 0.15)
                    animate_start_y = uniform(-0.5, 0.5)
                    animate_end_y = uniform(-0.15, 0.15)

                    # Scale animation
                    start_scale = uniform(0.5, 1.5)
                    end_scale = uniform(0.85, 1.15)

                elif zoom_value == "Zoom In":
                    animate_start_x = 0.0
                    animate_end_x = 0.0
                    animate_start_y = 0.0
                    animate_end_y = 0.0

                    # Scale animation
                    start_scale = 1.0
                    end_scale = 1.25

                elif zoom_value == "Zoom Out":
                    animate_start_x = 0.0
                    animate_end_x = 0.0
                    animate_start_y = 0.0
                    animate_end_y = 0.0

                    # Scale animation
                    start_scale = 1.25
                    end_scale = 1.0

                # Add keyframes
                start = openshot.Point(
                    round(start_time * fps_float) + 1, start_scale,
                    openshot.BEZIER)
                start_object = json.loads(start.Json())
                end = openshot.Point(
                    round(end_time * fps_float) + 1, end_scale,
                    openshot.BEZIER)
                end_object = json.loads(end.Json())
                new_clip["gravity"] = openshot.GRAVITY_CENTER
                new_clip["scale_x"]["Points"].append(start_object)
                new_clip["scale_x"]["Points"].append(end_object)
                new_clip["scale_y"]["Points"].append(start_object)
                new_clip["scale_y"]["Points"].append(end_object)

                # Add keyframes
                start_x = openshot.Point(
                    round(start_time * fps_float) + 1, animate_start_x,
                    openshot.BEZIER)
                start_x_object = json.loads(start_x.Json())
                end_x = openshot.Point(
                    round(end_time * fps_float) + 1, animate_end_x,
                    openshot.BEZIER)
                end_x_object = json.loads(end_x.Json())
                start_y = openshot.Point(
                    round(start_time * fps_float) + 1, animate_start_y,
                    openshot.BEZIER)
                start_y_object = json.loads(start_y.Json())
                end_y = openshot.Point(
                    round(end_time * fps_float) + 1, animate_end_y,
                    openshot.BEZIER)
                end_y_object = json.loads(end_y.Json())
                new_clip["gravity"] = openshot.GRAVITY_CENTER
                new_clip["location_x"]["Points"].append(start_x_object)
                new_clip["location_x"]["Points"].append(end_x_object)
                new_clip["location_y"]["Points"].append(start_y_object)
                new_clip["location_y"]["Points"].append(end_y_object)

            if transition_path:
                # Add transition for this clip (if any)
                # Open up QtImageReader for transition Image
                if random_transition:
                    random_index = randint(0, len(self.transitions))
                    transition_path = self.transitions[random_index]

                # Get reader for transition
                transition_reader = openshot.QtImageReader(transition_path)

                brightness = openshot.Keyframe()
                brightness.AddPoint(1, 1.0, openshot.BEZIER)
                brightness.AddPoint(
                    round(
                        min(transition_length, end_time - start_time) *
                        fps_float) + 1, -1.0, openshot.BEZIER)
                contrast = openshot.Keyframe(3.0)

                # Create transition dictionary
                transitions_data = {
                    "layer": track_num,
                    "title": "Transition",
                    "type": "Mask",
                    "start": 0,
                    "end": min(transition_length, end_time - start_time),
                    "brightness": json.loads(brightness.Json()),
                    "contrast": json.loads(contrast.Json()),
                    "reader": json.loads(transition_reader.Json()),
                    "replace_image": False
                }

                # Overlap this clip with the previous one (if any)
                position = max(start_position, position - transition_length)
                transitions_data["position"] = position
                new_clip["position"] = position

                # Create transition
                tran = Transition()
                tran.data = transitions_data
                tran.save()

            # Save Clip
            clip.data = new_clip
            clip.save()

            # Increment position by length of clip
            position += (end_time - start_time)

        # Accept dialog
        super(AddToTimeline, self).accept()
コード例 #8
0
    def read_legacy_project_file(self, file_path):
        """Attempt to read a legacy version 1.x openshot project file"""
        import sys, pickle
        from classes.query import File, Track, Clip, Transition
        from classes.app import get_app
        import openshot

        try:
            import json
        except ImportError:
            import simplejson as json

        # Get translation method
        _ = get_app()._tr

        # Append version info
        v = openshot.GetVersion()
        project_data = {}
        project_data["version"] = {
            "openshot-qt": info.VERSION,
            "libopenshot": v.ToString()
        }

        # Get FPS from project
        from classes.app import get_app
        fps = get_app().project.get(["fps"])
        fps_float = float(fps["num"]) / float(fps["den"])

        # Import legacy openshot classes (from version 1.X)
        from classes.legacy.openshot import classes as legacy_classes
        from classes.legacy.openshot.classes import project as legacy_project
        from classes.legacy.openshot.classes import sequences as legacy_sequences
        from classes.legacy.openshot.classes import track as legacy_track
        from classes.legacy.openshot.classes import clip as legacy_clip
        from classes.legacy.openshot.classes import keyframe as legacy_keyframe
        from classes.legacy.openshot.classes import files as legacy_files
        from classes.legacy.openshot.classes import transition as legacy_transition
        from classes.legacy.openshot.classes import effect as legacy_effect
        from classes.legacy.openshot.classes import marker as legacy_marker
        sys.modules['openshot.classes'] = legacy_classes
        sys.modules['classes.project'] = legacy_project
        sys.modules['classes.sequences'] = legacy_sequences
        sys.modules['classes.track'] = legacy_track
        sys.modules['classes.clip'] = legacy_clip
        sys.modules['classes.keyframe'] = legacy_keyframe
        sys.modules['classes.files'] = legacy_files
        sys.modules['classes.transition'] = legacy_transition
        sys.modules['classes.effect'] = legacy_effect
        sys.modules['classes.marker'] = legacy_marker

        # Keep track of files that failed to load
        failed_files = []

        with open(file_path.encode('UTF-8'), 'rb') as f:
            try:
                # Unpickle legacy openshot project file
                v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8")
                file_lookup = {}

                # Loop through files
                for item in v1_data.project_folder.items:
                    # Is this item a File (i.e. ignore folders)
                    if isinstance(item, legacy_files.OpenShotFile):
                        # Create file
                        try:
                            clip = openshot.Clip(item.name)
                            reader = clip.Reader()
                            file_data = json.loads(reader.Json())

                            # Determine media type
                            if file_data["has_video"] and not self.is_image(
                                    file_data):
                                file_data["media_type"] = "video"
                            elif file_data["has_video"] and self.is_image(
                                    file_data):
                                file_data["media_type"] = "image"
                            elif file_data["has_audio"] and not file_data[
                                    "has_video"]:
                                file_data["media_type"] = "audio"

                            # Save new file to the project data
                            file = File()
                            file.data = file_data
                            file.save()

                            # Keep track of new ids and old ids
                            file_lookup[item.unique_id] = file

                        except:
                            # Handle exception quietly
                            msg = (
                                "%s is not a valid video, audio, or image file."
                                % item.name)
                            log.error(msg)
                            failed_files.append(item.name)

                # Delete all tracks
                track_list = copy.deepcopy(Track.filter())
                for track in track_list:
                    track.delete()

                # Create new tracks
                track_counter = 0
                for legacy_t in reversed(v1_data.sequences[0].tracks):
                    t = Track()
                    t.data = {
                        "number": track_counter,
                        "y": 0,
                        "label": legacy_t.name
                    }
                    t.save()

                    track_counter += 1

                # Loop through clips
                track_counter = 0
                for sequence in v1_data.sequences:
                    for track in reversed(sequence.tracks):
                        for clip in track.clips:
                            # Get associated file for this clip
                            if clip.file_object.unique_id in file_lookup.keys(
                            ):
                                file = file_lookup[clip.file_object.unique_id]
                            else:
                                # Skip missing file
                                log.info(
                                    "Skipping importing missing file: %s" %
                                    clip.file_object.unique_id)
                                continue

                            # Create clip
                            if (file.data["media_type"] == "video"
                                    or file.data["media_type"] == "image"):
                                # Determine thumb path
                                thumb_path = os.path.join(
                                    info.THUMBNAIL_PATH,
                                    "%s.png" % file.data["id"])
                            else:
                                # Audio file
                                thumb_path = os.path.join(
                                    info.PATH, "images", "AudioThumbnail.png")

                            # Get file name
                            path, filename = os.path.split(file.data["path"])

                            # Convert path to the correct relative path (based on this folder)
                            file_path = file.absolute_path()

                            # Create clip object for this file
                            c = openshot.Clip(file_path)

                            # Append missing attributes to Clip JSON
                            new_clip = json.loads(c.Json())
                            new_clip["file_id"] = file.id
                            new_clip["title"] = filename
                            new_clip["image"] = thumb_path

                            # Check for optional start and end attributes
                            new_clip["start"] = clip.start_time
                            new_clip["end"] = clip.end_time
                            new_clip["position"] = clip.position_on_track
                            new_clip["layer"] = track_counter

                            # Clear alpha (if needed)
                            if clip.video_fade_in or clip.video_fade_out:
                                new_clip["alpha"]["Points"] = []

                            # Video Fade IN
                            if clip.video_fade_in:
                                # Add keyframes
                                start = openshot.Point(
                                    round(clip.start_time * fps_float) + 1,
                                    0.0, openshot.BEZIER)
                                start_object = json.loads(start.Json())
                                end = openshot.Point(
                                    round((clip.start_time +
                                           clip.video_fade_in_amount) *
                                          fps_float) + 1, 1.0, openshot.BEZIER)
                                end_object = json.loads(end.Json())
                                new_clip["alpha"]["Points"].append(
                                    start_object)
                                new_clip["alpha"]["Points"].append(end_object)

                            # Video Fade OUT
                            if clip.video_fade_out:
                                # Add keyframes
                                start = openshot.Point(
                                    round((clip.end_time -
                                           clip.video_fade_out_amount) *
                                          fps_float) + 1, 1.0, openshot.BEZIER)
                                start_object = json.loads(start.Json())
                                end = openshot.Point(
                                    round(clip.end_time * fps_float) + 1, 0.0,
                                    openshot.BEZIER)
                                end_object = json.loads(end.Json())
                                new_clip["alpha"]["Points"].append(
                                    start_object)
                                new_clip["alpha"]["Points"].append(end_object)

                            # Clear Audio (if needed)
                            if clip.audio_fade_in or clip.audio_fade_out:
                                new_clip["volume"]["Points"] = []
                            else:
                                p = openshot.Point(1, clip.volume / 100.0,
                                                   openshot.BEZIER)
                                p_object = json.loads(p.Json())
                                new_clip["volume"] = {"Points": [p_object]}

                            # Audio Fade IN
                            if clip.audio_fade_in:
                                # Add keyframes
                                start = openshot.Point(
                                    round(clip.start_time * fps_float) + 1,
                                    0.0, openshot.BEZIER)
                                start_object = json.loads(start.Json())
                                end = openshot.Point(
                                    round((clip.start_time +
                                           clip.video_fade_in_amount) *
                                          fps_float) + 1, clip.volume / 100.0,
                                    openshot.BEZIER)
                                end_object = json.loads(end.Json())
                                new_clip["volume"]["Points"].append(
                                    start_object)
                                new_clip["volume"]["Points"].append(end_object)

                            # Audio Fade OUT
                            if clip.audio_fade_out:
                                # Add keyframes
                                start = openshot.Point(
                                    round((clip.end_time -
                                           clip.video_fade_out_amount) *
                                          fps_float) + 1, clip.volume / 100.0,
                                    openshot.BEZIER)
                                start_object = json.loads(start.Json())
                                end = openshot.Point(
                                    round(clip.end_time * fps_float) + 1, 0.0,
                                    openshot.BEZIER)
                                end_object = json.loads(end.Json())
                                new_clip["volume"]["Points"].append(
                                    start_object)
                                new_clip["volume"]["Points"].append(end_object)

                            # Save clip
                            clip_object = Clip()
                            clip_object.data = new_clip
                            clip_object.save()

                        # Loop through transitions
                        for trans in track.transitions:
                            # Fix default transition
                            if not trans.resource or not os.path.exists(
                                    trans.resource):
                                trans.resource = os.path.join(
                                    info.PATH, "transitions", "common",
                                    "fade.svg")

                            # Open up QtImageReader for transition Image
                            transition_reader = openshot.QtImageReader(
                                trans.resource)

                            trans_begin_value = 1.0
                            trans_end_value = -1.0
                            if trans.reverse:
                                trans_begin_value = -1.0
                                trans_end_value = 1.0

                            brightness = openshot.Keyframe()
                            brightness.AddPoint(1, trans_begin_value,
                                                openshot.BEZIER)
                            brightness.AddPoint(
                                round(trans.length * fps_float) + 1,
                                trans_end_value, openshot.BEZIER)
                            contrast = openshot.Keyframe(trans.softness * 10.0)

                            # Create transition dictionary
                            transitions_data = {
                                "id": get_app().project.generate_id(),
                                "layer": track_counter,
                                "title": "Transition",
                                "type": "Mask",
                                "position": trans.position_on_track,
                                "start": 0,
                                "end": trans.length,
                                "brightness": json.loads(brightness.Json()),
                                "contrast": json.loads(contrast.Json()),
                                "reader": json.loads(transition_reader.Json()),
                                "replace_image": False
                            }

                            # Save transition
                            t = Transition()
                            t.data = transitions_data
                            t.save()

                        # Increment track counter
                        track_counter += 1

            except Exception as ex:
                # Error parsing legacy contents
                msg = _("Failed to load project file %(path)s: %(error)s" % {
                    "path": file_path,
                    "error": ex
                })
                log.error(msg)
                raise Exception(msg)

        # Show warning if some files failed to load
        if failed_files:
            # Throw exception
            raise Exception(
                _("Failed to load the following files:\n%s" %
                  ", ".join(failed_files)))

        # Return mostly empty project_data dict (with just the current version #)
        log.info("Successfully loaded legacy project file: %s" % file_path)
        return project_data
コード例 #9
0
ファイル: edl.py プロジェクト: Xen0byte/openshot-qt
def create_clip(context, track):
    """Create a new clip based on this context dict"""
    app = get_app()
    _ = app._tr

    # Get FPS info
    fps_num = app.project.get("fps").get("num", 24)
    fps_den = app.project.get("fps").get("den", 1)
    fps_float = float(fps_num / fps_den)

    # Get clip path (and prompt user if path not found)
    clip_path, is_modified, is_skipped = find_missing_file(
        context.get("clip_path", ""))
    if is_skipped:
        return

    # Get video context
    video_ctx = context.get("AX", {}).get("V", {})
    audio_ctx = context.get("AX", {}).get("A", {})

    # Check for this path in our existing project data
    file = File.get(path=clip_path)

    # Load filepath in libopenshot clip object (which will try multiple readers to open it)
    clip_obj = openshot.Clip(clip_path)

    if not file:
        # Get the JSON for the clip's internal reader
        try:
            reader = clip_obj.Reader()
            file_data = json.loads(reader.Json())

            # Determine media type
            if file_data["has_video"] and not is_image(file_data):
                file_data["media_type"] = "video"
            elif file_data["has_video"] and is_image(file_data):
                file_data["media_type"] = "image"
            elif file_data["has_audio"] and not file_data["has_video"]:
                file_data["media_type"] = "audio"

            # Save new file to the project data
            file = File()
            file.data = file_data

            # Save file
            file.save()
        except:
            log.warning('Error building File object for %s' % clip_path,
                        exc_info=1)

    if (file.data["media_type"] == "video"
            or file.data["media_type"] == "image"):
        # Determine thumb path
        thumb_path = os.path.join(info.THUMBNAIL_PATH,
                                  "%s.png" % file.data["id"])
    else:
        # Audio file
        thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")

    # Create Clip object
    clip = Clip()
    clip.data = json.loads(clip_obj.Json())
    clip.data["file_id"] = file.id
    clip.data["title"] = context.get("clip_path", "")
    clip.data["layer"] = track.data.get("number", 1000000)
    if video_ctx and not audio_ctx:
        # Only video
        clip.data["position"] = timecodeToSeconds(
            video_ctx.get("timeline_position", "00:00:00:00"), fps_num,
            fps_den)
        clip.data["start"] = timecodeToSeconds(
            video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den)
        clip.data["end"] = timecodeToSeconds(
            video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den)
        clip.data["has_audio"] = {
            "Points": [{
                "co": {
                    "X": 1.0,
                    "Y": 0.0  # Disable audio
                },
                "interpolation": 2
            }]
        }
    elif audio_ctx and not video_ctx:
        # Only audio
        clip.data["position"] = timecodeToSeconds(
            audio_ctx.get("timeline_position", "00:00:00:00"), fps_num,
            fps_den)
        clip.data["start"] = timecodeToSeconds(
            audio_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den)
        clip.data["end"] = timecodeToSeconds(
            audio_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den)
        clip.data["has_video"] = {
            "Points": [{
                "co": {
                    "X": 1.0,
                    "Y": 0.0  # Disable video
                },
                "interpolation": 2
            }]
        }
    else:
        # Both video and audio
        clip.data["position"] = timecodeToSeconds(
            video_ctx.get("timeline_position", "00:00:00:00"), fps_num,
            fps_den)
        clip.data["start"] = timecodeToSeconds(
            video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den)
        clip.data["end"] = timecodeToSeconds(
            video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den)

    # Add volume keyframes
    if context.get("volume"):
        clip.data["volume"] = {"Points": []}
        for keyframe in context.get("volume", []):
            clip.data["volume"]["Points"].append({
                "co": {
                    "X":
                    round(
                        timecodeToSeconds(keyframe.get("time", 0.0), fps_num,
                                          fps_den) * fps_float),
                    "Y":
                    keyframe.get("value", 0.0)
                },
                "interpolation": 1  # linear
            })

    # Add alpha keyframes
    if context.get("opacity"):
        clip.data["alpha"] = {"Points": []}
        for keyframe in context.get("opacity", []):
            clip.data["alpha"]["Points"].append({
                "co": {
                    "X":
                    round(
                        timecodeToSeconds(keyframe.get("time", 0.0), fps_num,
                                          fps_den) * fps_float),
                    "Y":
                    keyframe.get("value", 0.0)
                },
                "interpolation": 1  # linear
            })

    # Save clip
    clip.save()
コード例 #10
0
ファイル: project_data.py プロジェクト: OpenShot/openshot-qt
    def read_legacy_project_file(self, file_path):
        """Attempt to read a legacy version 1.x openshot project file"""
        import sys, pickle
        from classes.query import File, Track, Clip, Transition
        from classes.app import get_app
        import openshot

        try:
            import json
        except ImportError:
            import simplejson as json

        # Get translation method
        _ = get_app()._tr

        # Append version info
        v = openshot.GetVersion()
        project_data = {}
        project_data["version"] = {"openshot-qt" : info.VERSION,
                                   "libopenshot" : v.ToString()}

        # Get FPS from project
        from classes.app import get_app
        fps = get_app().project.get(["fps"])
        fps_float = float(fps["num"]) / float(fps["den"])

        # Import legacy openshot classes (from version 1.X)
        from classes.legacy.openshot import classes as legacy_classes
        from classes.legacy.openshot.classes import project as legacy_project
        from classes.legacy.openshot.classes import sequences as legacy_sequences
        from classes.legacy.openshot.classes import track as legacy_track
        from classes.legacy.openshot.classes import clip as legacy_clip
        from classes.legacy.openshot.classes import keyframe as legacy_keyframe
        from classes.legacy.openshot.classes import files as legacy_files
        from classes.legacy.openshot.classes import transition as legacy_transition
        from classes.legacy.openshot.classes import effect as legacy_effect
        from classes.legacy.openshot.classes import marker as legacy_marker
        sys.modules['openshot.classes'] = legacy_classes
        sys.modules['classes.project'] = legacy_project
        sys.modules['classes.sequences'] = legacy_sequences
        sys.modules['classes.track'] = legacy_track
        sys.modules['classes.clip'] = legacy_clip
        sys.modules['classes.keyframe'] = legacy_keyframe
        sys.modules['classes.files'] = legacy_files
        sys.modules['classes.transition'] = legacy_transition
        sys.modules['classes.effect'] = legacy_effect
        sys.modules['classes.marker'] = legacy_marker

        # Keep track of files that failed to load
        failed_files = []

        with open(file_path.encode('UTF-8'), 'rb') as f:
            try:
                # Unpickle legacy openshot project file
                v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8")
                file_lookup = {}

                # Loop through files
                for item in v1_data.project_folder.items:
                    # Is this item a File (i.e. ignore folders)
                    if isinstance(item, legacy_files.OpenShotFile):
                        # Create file
                        try:
                            clip = openshot.Clip(item.name)
                            reader = clip.Reader()
                            file_data = json.loads(reader.Json(), strict=False)

                            # Determine media type
                            if file_data["has_video"] and not self.is_image(file_data):
                                file_data["media_type"] = "video"
                            elif file_data["has_video"] and self.is_image(file_data):
                                file_data["media_type"] = "image"
                            elif file_data["has_audio"] and not file_data["has_video"]:
                                file_data["media_type"] = "audio"

                            # Save new file to the project data
                            file = File()
                            file.data = file_data
                            file.save()

                            # Keep track of new ids and old ids
                            file_lookup[item.unique_id] = file

                        except:
                            # Handle exception quietly
                            msg = ("%s is not a valid video, audio, or image file." % item.name)
                            log.error(msg)
                            failed_files.append(item.name)

                # Delete all tracks
                track_list = copy.deepcopy(Track.filter())
                for track in track_list:
                    track.delete()

                # Create new tracks
                track_counter = 0
                for legacy_t in reversed(v1_data.sequences[0].tracks):
                    t = Track()
                    t.data = {"number": track_counter, "y": 0, "label": legacy_t.name}
                    t.save()

                    track_counter += 1

                # Loop through clips
                track_counter = 0
                for sequence in v1_data.sequences:
                    for track in reversed(sequence.tracks):
                        for clip in track.clips:
                            # Get associated file for this clip
                            if clip.file_object.unique_id in file_lookup.keys():
                                file = file_lookup[clip.file_object.unique_id]
                            else:
                                # Skip missing file
                                log.info("Skipping importing missing file: %s" % clip.file_object.unique_id)
                                continue

                            # Create clip
                            if (file.data["media_type"] == "video" or file.data["media_type"] == "image"):
                                # Determine thumb path
                                thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"])
                            else:
                                # Audio file
                                thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")

                            # Get file name
                            path, filename = os.path.split(file.data["path"])

                            # Convert path to the correct relative path (based on this folder)
                            file_path = file.absolute_path()

                            # Create clip object for this file
                            c = openshot.Clip(file_path)

                            # Append missing attributes to Clip JSON
                            new_clip = json.loads(c.Json(), strict=False)
                            new_clip["file_id"] = file.id
                            new_clip["title"] = filename
                            new_clip["image"] = thumb_path

                            # Check for optional start and end attributes
                            new_clip["start"] = clip.start_time
                            new_clip["end"] = clip.end_time
                            new_clip["position"] = clip.position_on_track
                            new_clip["layer"] = track_counter

                            # Clear alpha (if needed)
                            if clip.video_fade_in or clip.video_fade_out:
                                new_clip["alpha"]["Points"] = []

                            # Video Fade IN
                            if clip.video_fade_in:
                                # Add keyframes
                                start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER)
                                start_object = json.loads(start.Json(), strict=False)
                                end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER)
                                end_object = json.loads(end.Json(), strict=False)
                                new_clip["alpha"]["Points"].append(start_object)
                                new_clip["alpha"]["Points"].append(end_object)

                            # Video Fade OUT
                            if clip.video_fade_out:
                                # Add keyframes
                                start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER)
                                start_object = json.loads(start.Json(), strict=False)
                                end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER)
                                end_object = json.loads(end.Json(), strict=False)
                                new_clip["alpha"]["Points"].append(start_object)
                                new_clip["alpha"]["Points"].append(end_object)

                            # Clear Audio (if needed)
                            if clip.audio_fade_in or clip.audio_fade_out:
                                new_clip["volume"]["Points"] = []
                            else:
                                p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER)
                                p_object = json.loads(p.Json(), strict=False)
                                new_clip["volume"] = { "Points" : [p_object]}

                            # Audio Fade IN
                            if clip.audio_fade_in:
                                # Add keyframes
                                start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER)
                                start_object = json.loads(start.Json(), strict=False)
                                end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER)
                                end_object = json.loads(end.Json(), strict=False)
                                new_clip["volume"]["Points"].append(start_object)
                                new_clip["volume"]["Points"].append(end_object)

                            # Audio Fade OUT
                            if clip.audio_fade_out:
                                # Add keyframes
                                start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER)
                                start_object = json.loads(start.Json(), strict=False)
                                end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER)
                                end_object = json.loads(end.Json(), strict=False)
                                new_clip["volume"]["Points"].append(start_object)
                                new_clip["volume"]["Points"].append(end_object)

                            # Save clip
                            clip_object = Clip()
                            clip_object.data = new_clip
                            clip_object.save()

                        # Loop through transitions
                        for trans in track.transitions:
                            # Fix default transition
                            if not trans.resource or not os.path.exists(trans.resource):
                                trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg")

                            # Open up QtImageReader for transition Image
                            transition_reader = openshot.QtImageReader(trans.resource)

                            trans_begin_value = 1.0
                            trans_end_value = -1.0
                            if trans.reverse:
                                trans_begin_value = -1.0
                                trans_end_value = 1.0

                            brightness = openshot.Keyframe()
                            brightness.AddPoint(1, trans_begin_value, openshot.BEZIER)
                            brightness.AddPoint(round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER)
                            contrast = openshot.Keyframe(trans.softness * 10.0)

                            # Create transition dictionary
                            transitions_data = {
                                "id": get_app().project.generate_id(),
                                "layer": track_counter,
                                "title": "Transition",
                                "type": "Mask",
                                "position": trans.position_on_track,
                                "start": 0,
                                "end": trans.length,
                                "brightness": json.loads(brightness.Json(), strict=False),
                                "contrast": json.loads(contrast.Json(), strict=False),
                                "reader": json.loads(transition_reader.Json(), strict=False),
                                "replace_image": False
                            }

                            # Save transition
                            t = Transition()
                            t.data = transitions_data
                            t.save()

                        # Increment track counter
                        track_counter += 1

            except Exception as ex:
                # Error parsing legacy contents
                msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex})
                log.error(msg)
                raise Exception(msg)

        # Show warning if some files failed to load
        if failed_files:
            # Throw exception
            raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files)))

        # Return mostly empty project_data dict (with just the current version #)
        log.info("Successfully loaded legacy project file: %s" % file_path)
        return project_data
コード例 #11
0
    def accept(self):
        """ Ok button clicked """
        log.info('accept')

        # Get settings from form
        start_position = self.txtStartTime.value()
        track_num = self.cmbTrack.currentData()
        fade_value = self.cmbFade.currentData()
        fade_length = self.txtFadeLength.value()
        transition_path = self.cmbTransition.currentData()
        transition_length = self.txtTransitionLength.value()
        image_length = self.txtImageLength.value()
        zoom_value = self.cmbZoom.currentData()

        # Init position
        position = start_position

        random_transition = False
        if transition_path == "random":
            random_transition = True

        # Get frames per second
        fps = get_app().project.get(["fps"])
        fps_float = float(fps["num"]) / float(fps["den"])

        # Loop through each file (in the current order)
        for file in self.treeFiles.timeline_model.files:
            # Create a clip
            clip = Clip()
            clip.data = {}

            if (file.data["media_type"] == "video" or file.data["media_type"] == "image"):
                # Determine thumb path
                thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"])
            else:
                # Audio file
                thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")

            # Get file name
            path, filename = os.path.split(file.data["path"])

            # Convert path to the correct relative path (based on this folder)
            file_path = file.absolute_path()

            # Create clip object for this file
            c = openshot.Clip(file_path)

            # Append missing attributes to Clip JSON
            new_clip = json.loads(c.Json())
            new_clip["position"] = position
            new_clip["layer"] = track_num
            new_clip["file_id"] = file.id
            new_clip["title"] = filename
            new_clip["image"] = thumb_path

            # Overwrite frame rate (incase the user changed it in the File Properties)
            file_properties_fps = float(file.data["fps"]["num"]) / float(file.data["fps"]["den"])
            file_fps = float(new_clip["reader"]["fps"]["num"]) / float(new_clip["reader"]["fps"]["den"])
            fps_diff = file_fps / file_properties_fps
            new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"]
            new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"]
            # Scale duration / length / and end properties
            new_clip["reader"]["duration"] *= fps_diff
            new_clip["end"] *= fps_diff
            new_clip["duration"] *= fps_diff

            # Check for optional start and end attributes
            start_time = 0
            end_time = new_clip["reader"]["duration"]

            if 'start' in file.data.keys():
                start_time = file.data['start']
                new_clip["start"] = start_time
            if 'end' in file.data.keys():
                end_time = file.data['end']
                new_clip["end"] = end_time

            # Adjust clip duration, start, and end
            new_clip["duration"] = new_clip["reader"]["duration"]
            if file.data["media_type"] == "image":
                end_time = image_length
                new_clip["end"] = end_time

            # Adjust Fade of Clips (if no transition is chosen)
            if not transition_path:
                if fade_value != None:
                    # Overlap this clip with the previous one (if any)
                    position = max(start_position, new_clip["position"] - fade_length)
                    new_clip["position"] = position

                if fade_value == 'Fade In' or fade_value == 'Fade In & Out':
                    start = openshot.Point((start_time * fps_float) + 1, 0.0, openshot.BEZIER)
                    start_object = json.loads(start.Json())
                    end = openshot.Point(min((start_time + fade_length) * fps_float, end_time * fps_float), 1.0, openshot.BEZIER)
                    end_object = json.loads(end.Json())
                    new_clip['alpha']["Points"].append(start_object)
                    new_clip['alpha']["Points"].append(end_object)

                if fade_value == 'Fade Out' or fade_value == 'Fade In & Out':
                    start = openshot.Point(max((end_time * fps_float) - (fade_length * fps_float), start_time * fps_float), 1.0, openshot.BEZIER)
                    start_object = json.loads(start.Json())
                    end = openshot.Point(end_time * fps_float, 0.0, openshot.BEZIER)
                    end_object = json.loads(end.Json())
                    new_clip['alpha']["Points"].append(start_object)
                    new_clip['alpha']["Points"].append(end_object)

            # Adjust zoom amount
            if zoom_value != None:
                # Location animation
                if zoom_value == "Random":
                    animate_start_x = uniform(-0.5, 0.5)
                    animate_end_x = uniform(-0.15, 0.15)
                    animate_start_y = uniform(-0.5, 0.5)
                    animate_end_y = uniform(-0.15, 0.15)

                    # Scale animation
                    start_scale = uniform(0.5, 1.5)
                    end_scale = uniform(0.85, 1.15)

                elif zoom_value == "Zoom In":
                    animate_start_x = 0.0
                    animate_end_x = 0.0
                    animate_start_y = 0.0
                    animate_end_y = 0.0

                    # Scale animation
                    start_scale = 1.0
                    end_scale = 1.25

                elif zoom_value == "Zoom Out":
                    animate_start_x = 0.0
                    animate_end_x = 0.0
                    animate_start_y = 0.0
                    animate_end_y = 0.0

                    # Scale animation
                    start_scale = 1.25
                    end_scale = 1.0

                # Add keyframes
                start = openshot.Point((start_time * fps_float) + 1, start_scale, openshot.BEZIER)
                start_object = json.loads(start.Json())
                end = openshot.Point(end_time * fps_float, end_scale, openshot.BEZIER)
                end_object = json.loads(end.Json())
                new_clip["gravity"] = openshot.GRAVITY_CENTER
                new_clip["scale_x"]["Points"].append(start_object)
                new_clip["scale_x"]["Points"].append(end_object)
                new_clip["scale_y"]["Points"].append(start_object)
                new_clip["scale_y"]["Points"].append(end_object)

                # Add keyframes
                start_x = openshot.Point((start_time * fps_float) + 1, animate_start_x, openshot.BEZIER)
                start_x_object = json.loads(start_x.Json())
                end_x = openshot.Point(end_time * fps_float, animate_end_x, openshot.BEZIER)
                end_x_object = json.loads(end_x.Json())
                start_y = openshot.Point((start_time * fps_float) + 1, animate_start_y, openshot.BEZIER)
                start_y_object = json.loads(start_y.Json())
                end_y = openshot.Point(end_time * fps_float, animate_end_y, openshot.BEZIER)
                end_y_object = json.loads(end_y.Json())
                new_clip["gravity"] = openshot.GRAVITY_CENTER
                new_clip["location_x"]["Points"].append(start_x_object)
                new_clip["location_x"]["Points"].append(end_x_object)
                new_clip["location_y"]["Points"].append(start_y_object)
                new_clip["location_y"]["Points"].append(end_y_object)

            if transition_path:
                # Add transition for this clip (if any)
                # Open up QtImageReader for transition Image
                if random_transition:
                    random_index = randint(0, len(self.transitions))
                    transition_path = self.transitions[random_index]

                # Get reader for transition
                transition_reader = openshot.QtImageReader(transition_path)

                brightness = openshot.Keyframe()
                brightness.AddPoint(1, 1.0, openshot.BEZIER)
                brightness.AddPoint(min(transition_length, end_time - start_time) * fps_float, -1.0, openshot.BEZIER)
                contrast = openshot.Keyframe(3.0)

                # Create transition dictionary
                transitions_data = {
                    "layer": track_num,
                    "title": "Transition",
                    "type": "Mask",
                    "start": 0,
                    "end": min(transition_length, end_time - start_time),
                    "brightness": json.loads(brightness.Json()),
                    "contrast": json.loads(contrast.Json()),
                    "reader": json.loads(transition_reader.Json()),
                    "replace_image": False
                }

                # Overlap this clip with the previous one (if any)
                position = max(start_position, position - transition_length)
                transitions_data["position"] = position
                new_clip["position"] = position

                # Create transition
                tran = Transition()
                tran.data = transitions_data
                tran.save()


            # Save Clip
            clip.data = new_clip
            clip.save()

            # Increment position by length of clip
            position += (end_time - start_time)


        # Accept dialog
        super(AddToTimeline, self).accept()
コード例 #12
0
    def setUpClass(cls):
        """ Init unit test data """
        # Create Qt application
        cls.app = QGuiApplication.instance()
        cls.clip_ids = []
        cls.file_ids = []
        cls.transition_ids = []

        clips = []

        # Insert some clips into the project data
        for num in range(5):
            # Create clip
            c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png"))
            c.Position(num * 10.0)
            c.End(5.0)

            # Parse JSON
            clip_data = json.loads(c.Json())

            # Insert into project data
            query_clip = Clip()
            query_clip.data = clip_data
            query_clip.save()

            # Keep track of the ids
            cls.clip_ids.append(query_clip.id)
            clips.append(query_clip)

        # Insert some files into the project data
        for num in range(5):
            # Create file
            r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100,
                                     2, 30.0)

            # Parse JSON
            file_data = json.loads(r.Json())

            # Insert into project data
            query_file = File()
            query_file.data = file_data
            query_file.data["path"] = os.path.join(info.IMAGES_PATH,
                                                   "AboutLogo.png")
            query_file.data["media_type"] = "image"
            query_file.save()

            # Keep track of the ids
            cls.file_ids.append(query_file.id)

        # Insert some transitions into the project data
        for c in clips:
            # Create mask object
            t = openshot.Mask()
            # Place over the last second of current clip
            pos = c.data.get("position", 0.0)
            start = c.data.get("start", 0.0)
            end = c.data.get("end", 0.0)
            t.Position((pos - start + end) - 1.0)
            t.End(1.0)

            # Insert into project data
            transitions_data = json.loads(t.Json())
            query_transition = Transition()
            query_transition.data = transitions_data
            query_transition.save()

            # Keep track of the ids
            cls.transition_ids.append(query_transition.id)

        # Don't keep the full query objects around
        del clips