예제 #1
0
def _save_audio(response, data: StepData, config: dict):
    post_generate = config.get("post_generate", {})
    extension = data.format(post_generate["file_extension"]) if post_generate.get("file_extension",
                                                                                  None) is not None else None
    # if multiple requests were used, get only the request with the audio file
    if config["generate"]["type"].startswith("request_multiple"):
        audio_idx = data.format(config["generate"]["audio_idx"])
        response = response[audio_idx]

    content_type = response["headers"]["content-type"]
    audio = response["content"]

    # if content type is JSON, try to decode JSON string with base64
    if content_type.startswith("application/json"):
        # get audio string
        audio = data_get_pattern(data.format(post_generate["audio_key"]), audio)
        # decode Audio Key with base64
        audio = base64.b64decode(audio)
    elif extension is None:
        # check if content type is an audio type
        if not content_type.startswith("audio"):
            raise InvalidContentTypeError(None, content_type, "'audio/*'")

        # get file extention from mime type:
        extension = mimetypes.guess_all_extensions(content_type)[0].replace(".", "")

    audio_path = resources.new_temp_resource_path(data.data["_pipe_id"], extension)

    with open(audio_path, "wb") as fp:
        fp.write(audio)

    return audio_path
예제 #2
0
def default(values: dict, data: StepData, config: dict):
    """Generiert eine Audiodatei mit der Python-Bibliothek gTTS.

    Wenn in der Konfiguration `sub_pairs` angegeben sind, werden diese den bisherigen `sub_pairs` hinzugefügt.
    `sub_pairs` sind bestimmte Wörter, die im Text ersetzt werden sollen.
    Beispiel: "z.B." soll vorgelesen werden als "zum Beispiel".

    :param values: Werte aus der JSON-Datei
    :param data: Daten aus der API
    :param config: Daten aus der Konfigurationsdatei
    :return:
    """
    for key in values:
        text = part.audio_parts(values[key]["parts"], data)

        sub_pairs = data.deep_format(config.get("sub_pairs", None), values=values)

        if sub_pairs:
            for key in sub_pairs:
                value = data.get_data(key, values)
                gtts.tokenizer.symbols.SUB_PAIRS.append((key, value))

        tts = gTTS(text, lang=data.format(config["lang"]))

        file_path = resources.new_temp_resource_path(data.data["_pipe_id"], data.format(config["format"]))
        tts.save(file_path)

        values[key] = file_path
def pillow(values: dict, step_data: StepData, prev_paths: dict):
    """
    Erstellt ein Bild mit Hilfe der Python-Bibliothek Pillow.
    Dazu wird ein neues Bild geöffnet oder ein bisher erstelltes Bild weiter bearbeitet.
    In der JSON können beliebige viele Overlays angegeben werden, welche diese Methode alle
    ausführt und auf das Bild setzt.

    :param values: Image-Bauplan des zu erstellenden Bildes
    :param step_data: Daten aus der API
    :param prev_paths: alle Image-Baupläne und somit auch alle Pfade zu den bisher erstellten Bildern
    :return: Pfad zum erstellten Bild
    :rtype: str
    """
    if values.get("path", None) is None:
        image_name = step_data.format(values["image_name"])
        source_img = Image.open(
            resources.get_resource_path(prev_paths[image_name]))
    else:
        path = step_data.format(values["path"])
        source_img = Image.open(resources.get_image_path(path))
    img1 = Image.new("RGBA", source_img.size)
    draw = ImageDraw.Draw(source_img)

    for overlay in values["overlay"]:
        over_func = get_type_func(overlay, OVERLAY_TYPES)
        over_func(overlay, step_data, source_img, prev_paths, draw)

    file = resources.new_temp_resource_path(step_data.data["_pipe_id"], "png")
    Image.composite(img1, source_img, img1).save(file)
    return file
예제 #4
0
def _text_to_audio(data: StepData, values: dict, text: str, config: dict):
    sub_pairs = data.deep_format(config.get("sub_pairs", None), values=values)

    if sub_pairs:
        for key in sub_pairs:
            value = data.get_data(key, values)
            gtts.tokenizer.symbols.SUB_PAIRS.append((key, value))

    tts = gTTS(data.format(text, values), lang=data.format(config["lang"]))

    file_path = resources.new_temp_resource_path(data.data["_pipe_id"],
                                                 data.format(config["format"]))
    tts.save(file_path)
    return file_path
예제 #5
0
def default(values: dict, data: StepData, config: dict):
    """Generiert eine Audiodatei mit der Python-Bibliothek gTTS.

    Wenn in der Konfiguration `sub_pairs` angegeben sind, werden diese den bisherigen `sub_pairs` hinzugefügt.
    `sub_pairs` sind bestimmte Wörter, die im Text ersetzt werden sollen.
    Beispiel: "z.B." soll vorgelesen werden als "zum Beispiel".

    :param values: Werte aus der JSON-Datei
    :param data: Daten aus der API
    :param config: Daten aus der Konfigurationsdatei
    :return:
    """
    for key in values:

        text = part.audio_parts(values[key]["parts"], data)

        if text[1]:  # wird ausgeführt, falls die Audio nur aus angegebenem Text besteht
            values[key] = _text_to_audio(data, values, text[0], config)
        else:  # wird ausgeführt, falls die Audio auch statische Dateien oder lautlose Audios enthält
            audio_list = []
            for item in values[key]["parts"]:
                if item["type"] == "text":
                    audio_list.append(
                        _text_to_audio(data, values, item["pattern"], config))
                if item["type"] == "file":
                    audio_list.append(resources.get_audio_path(item["path"]))
                if item["type"] == "random_text":
                    len_pattern = len(item["pattern"])
                    if len_pattern == 1:
                        audio_list.append(
                            _text_to_audio(data, values, item["pattern"][0],
                                           config))
                    else:
                        rand = randint(0, len_pattern - 1)
                        audio_list.append(
                            _text_to_audio(data, values, item["pattern"][rand],
                                           config))
                if item["type"] == "silent":
                    duration = item["duration"] * 1000
                    silence = AudioSegment.silent(duration=duration)
                    silent_audio_file_path = resources.new_temp_resource_path(
                        data.data["_pipe_id"], "mp3")
                    silence.export(silent_audio_file_path, format="mp3")
                    audio_list.append(silent_audio_file_path)

            # values[key] = _audios_to_audio(audio_list, data)
            values[key] = combine_audios(audio_list, data)
예제 #6
0
def combine_audios(audio_list, data: StepData):
    """
    Funktion, um mehrere Audios zu einer großen Audio zu kombinieren

    :param audio_list: Liste der Dateinamen der einzelnen Audios
    :param data: Daten der Pipeline

    :return: Pfad zu der kombinierten Audio
    """
    audios = [AudioSegment.from_mp3(file_name) for file_name in audio_list]
    combined = AudioSegment.empty()
    for audio in audios:
        combined += audio
    combined_audio_path = resources.new_temp_resource_path(
        data.data["_pipe_id"], "mp3")
    combined.export(combined_audio_path, format="mp3")
    return combined_audio_path
예제 #7
0
def _audios_to_audio(audios, step_data: StepData):
    with open(
            resources.get_temp_resource_path("input.txt",
                                             step_data.data["_pipe_id"]),
            "w") as file:
        for i in audios:
            file.write("file 'file:" + i + "'\n")
    output = resources.new_temp_resource_path(step_data.data["_pipe_id"],
                                              "mp3")
    args1 = [
        "ffmpeg", "-loglevel", "8", "-f", "concat", "-safe", "0", "-i",
        resources.get_temp_resource_path("input.txt",
                                         step_data.data["_pipe_id"]), "-c",
        "copy", output
    ]
    subprocess.run(args1,
                   stdout=subprocess.PIPE,
                   stderr=subprocess.STDOUT,
                   check=True)
    return output
예제 #8
0
def _generate(images, audios, audio_l, step_data: StepData, values: dict):
    try:
        if step_data.get_config("h264_nvenc", False):
            os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda/lib64"

        # Concatenate audio files
        if values["sequence"].get("audio_breaks", False):
            temp_audios = []
            for idx, s_audio in enumerate(audios):
                temp_audios.append(resources.new_temp_resource_path(step_data.data["_pipe_id"], "wav"))
                args = ["ffmpeg", "-loglevel", "8", "-i", s_audio, temp_audios[idx]]
                subprocess.run(args)

            combined_sound = AudioSegment.empty()
            for idx, i in enumerate(temp_audios):
                sound = AudioSegment.from_file(i, "wav")
                combined_sound += sound
                time_diff = audio_l[idx] - MP3(audios[idx]).info.length
                if time_diff > 0:
                    silence = AudioSegment.silent(duration=time_diff * 1000)
                    combined_sound += silence
            temp_output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "wav")
            combined_sound.export(temp_output, format="wav")

            output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "mp3")
            args = ["ffmpeg", "-loglevel", "8", "-i", temp_output, "-vn", "-ar", "44100", "-ac", "2", "-b:a", "192k",
                    output]
            subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)

        else:
            with open(resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]), "w") as file:
                for i in audios:
                    file.write("file 'file:" + i + "'\n")
            output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "mp3")
            args1 = ["ffmpeg", "-loglevel", "8", "-f", "concat", "-safe", "0", "-i",
                     resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]),
                     "-c", "copy",
                     output]
            subprocess.run(args1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)

        # Generate video

        output2 = resources.get_out_path(values["out_time"], step_data.get_config("output_path", ""),
                                         step_data.get_config("job_name", ""))
        if step_data.get_config("separate_rendering", False):
            output2 = resources.get_out_path(values["out_time"], step_data.get_config("output_path", ""),
                                             step_data.get_config("job_name", "") + "_0")

        args2 = ["ffmpeg", "-loglevel", "8", "-y"]
        for i in range(0, len(images)):
            args2.extend(("-loop", "1", "-t", str(round(audio_l[i], 2)), "-i", images[i]))

        args2.extend(("-i", output, "-c:a", "copy"))

        filter = ""
        for i in range(0, len(images) - 1):
            filter += f"[{i + 1}]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, i)}/TB[f{i}];"
        for j in range(0, len(images) - 1):
            if j == 0:
                filter += "[0][f0]overlay[bg1];"
            elif j == len(images) - 2:
                filter += f"[bg{j}][f{j}]overlay,format=yuv420p[v]"
            else:
                filter += f"[bg{j}][f{j}]overlay[bg{j + 1}];"
        if len(images) > 1:
            if len(images) == 2:
                filter = f"[1]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, 0)}/TB[f0];[0][f0]overlay,format=yuv420p[v]"
            args2.extend(("-filter_complex", filter, "-map", "[v]", "-map", str(len(images)) + ":a"))
        else:
            args2.extend(("-pix_fmt", "yuv420p"))
        if step_data.get_config("h264_nvenc", False):
            args2.extend(("-c:v", "h264_nvenc"))

        args2.extend(("-s", "1920x1080", output2))
        subprocess.run(args2, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)

        values["sequence"] = output2

    except subprocess.CalledProcessError as e:
        raise FFmpegError(e.returncode, e.output.decode("utf-8")) from e
def generate_diagram(values: dict, step_data: StepData, prev_paths):
    """
    Alte Implementierung der Diagramme für alte Job-Konfigurationen
    """
    data = step_data.format(values["data"])
    data = data[1:len(data) - 1].split(", ")
    data = list(map(float, data))
    days = []
    labels = None
    if values.get("bar_label", None) is not None:
        labels = step_data.format(values["bar_label"])
        labels = labels[1:len(labels) - 1].split(", ")
        labels = list(map(str, labels))
    if step_data.format(values.get("label_use_date", False)):
        now = datetime.now()
        for hop_value in range(len(data)):
            day = now - timedelta(days=hop_value)
            days.insert(0, day.strftime('%d.%m'))
    else:
        days = step_data.format(values["label"])
        days = days.replace("'", "")
        days = days[1:len(days) - 1].split(", ")
        days = list(map(str, days))
        if values.get("label_append", None) is not None:
            replace_cities = values["label_append"]
            for idx, city in enumerate(days):
                for replace_city in replace_cities:
                    if city == replace_city:
                        label_append_value = str(values["label_append_value"])
                        label_append_value = label_append_value.replace(
                            "_idx", str(idx))
                        days[idx] = city + " " + step_data.format(
                            label_append_value)
    plt.rcParams.update(
        {'font.size': step_data.format(values.get("label_size", 18))})
    fig = plt.figure(figsize=[
        step_data.format(values.get("plot_size_x", 20)),
        step_data.format(values.get("plot_size_y", 10))
    ])
    ax = fig.add_subplot(111)

    if step_data.format(values.get("sorted", False)):
        if labels is None:
            days, data = zip(*sorted(zip(days, data)))
        else:
            days, data, labels = zip(*sorted(zip(days, data, labels)))

    for axis in ['top', 'bottom', 'left', 'right']:
        ax.spines[axis].set_linewidth(
            step_data.format(values.get("axis_depth", 1)))

    if values.get("grid_axis", None) is not None:
        ax.grid(axis=step_data.format(values["grid_axis"]),
                color=step_data.format(values.get("grid_color", "grey")))
    if step_data.format(values["plot_type"]) == "bar_chart":
        ax.set_yticks(np.arange(len(days)))
        ax.set_yticklabels(days)
        if step_data.get_config("marked_city", None) is not None:
            for l in ax.get_yticklabels():
                if l.get_text() == step_data.format(
                        step_data.get_config("marked_city", "")):
                    l.set_fontweight(550)
        ax.invert_yaxis()  # labels von oben nach unten

        bar_list = plt.barh(
            np.arange(len(days)),
            data,
            color=(step_data.format(values["label_color"].get("r", 0)),
                   step_data.format(values["label_color"].get("g", 0)),
                   step_data.format(values["label_color"].get("b", 0)),
                   step_data.format(values["label_color"].get("t", 1))))

    elif step_data.format(values["plot_type"]) == "column_chart":
        bar_list = plt.bar(
            days,
            data,
            color=(step_data.format(values["label_color"].get("r", 0)),
                   step_data.format(values["label_color"].get("g", 0)),
                   step_data.format(values["label_color"].get("b", 0)),
                   step_data.format(values["label_color"].get("t", 1))))
    else:
        raise
    if step_data.format(values.get("use_extended_labels", False)):
        current_value = 0
        x_label_list = [current_value]
        max_value = max(data)
        hop_value = 10
        hop_values = values.get("extended_labels_map", None)
        if hop_values is not None:
            for entry in hop_values:
                if entry["value"] < max_value:
                    hop_value = entry["step"]
        while current_value < max_value:
            current_value = current_value + hop_value
            x_label_list.append(current_value)
        counter = 0
        counters = values.get("extended_labels_append", None)
        if counters is not None:
            for entry in counters:
                if entry["value"] < max_value:
                    counter = entry["amount"]
        while counter != 0:
            current_value = current_value + hop_value
            x_label_list.append(current_value)
            counter = counter - 1
        ax.set_xticks(x_label_list)

    for idx, b in enumerate(bar_list):
        color_not_set = True
        for entry in values["bar_colors"]["list"]:
            if data[idx] > step_data.format(entry["number"]):
                b.set_color(step_data.format(entry["color"]))
                color_not_set = False
            if color_not_set:
                b.set_color(step_data.format(values["bar_colors"]["default"]))

    plt.xticks(rotation=step_data.format(values.get("label_rotation", 0)))
    plt.tight_layout()

    if values.get("bar_label", None) is not None:
        rects = ax.patches
        for rect, label, data, days in zip(rects, labels, data, days):
            if step_data.format(values.get("show_bar_label_sign", False)):
                if label[0] != '-' and float(label) != 0.0:
                    label = "+" + label
                if float(label) == 0.0:
                    label = "\u00B10"
            label = str(data) + " / " + label
            if step_data.get_config("marked_city", None) is not None:
                if days == step_data.format(
                        step_data.get_config("marked_city", "")):
                    ax.text(rect.get_width() + 0.4,
                            (rect.get_y() + rect.get_height() / 2) + 0.2,
                            label,
                            fontsize=step_data.format(
                                values["label_fontsize"]),
                            fontweight=550)
                else:
                    ax.text(rect.get_width() + 0.4,
                            (rect.get_y() + rect.get_height() / 2) + 0.2,
                            label,
                            fontsize=step_data.format(
                                values["label_fontsize"]))
    file = resources.new_temp_resource_path(step_data.data["_pipe_id"], "png")
    plt.savefig(file, transparent=True)
    return file
예제 #10
0
def _generate(images, audios, audio_l, step_data: StepData, values: dict):
    try:
        if step_data.get_config("h264_nvenc", False):
            os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda/lib64"

        # Concatenate audio files

        with open(
                resources.get_temp_resource_path("input.txt",
                                                 step_data.data["_pipe_id"]),
                "w") as file:
            for i in audios:
                file.write("file 'file:" + i + "'\n")
        output = resources.new_temp_resource_path(step_data.data["_pipe_id"],
                                                  "mp3")
        args1 = [
            "ffmpeg", "-loglevel", "8", "-f", "concat", "-safe", "0", "-i",
            resources.get_temp_resource_path("input.txt",
                                             step_data.data["_pipe_id"]), "-c",
            "copy", output
        ]
        subprocess.run(args1,
                       stdout=subprocess.PIPE,
                       stderr=subprocess.STDOUT,
                       check=True)

        # Generate video

        output2 = resources.get_out_path(
            values["out_time"], step_data.get_config("output_path", ""),
            step_data.get_config("job_name", ""))
        if step_data.get_config("separate_rendering", False):
            output2 = resources.get_out_path(
                values["out_time"], step_data.get_config("output_path", ""),
                step_data.get_config("job_name", "") + "_0")

        args2 = ["ffmpeg", "-loglevel", "8", "-y"]
        for i in range(0, len(images)):
            args2.extend(("-loop", "1", "-t", str(round(audio_l[i],
                                                        2)), "-i", images[i]))

        args2.extend(("-i", output, "-c:a", "copy"))

        filter = ""
        for i in range(0, len(images) - 1):
            filter += f"[{i + 1}]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, i)}/TB[f{i}];"
        for j in range(0, len(images) - 1):
            if j == 0:
                filter += "[0][f0]overlay[bg1];"
            elif j == len(images) - 2:
                filter += f"[bg{j}][f{j}]overlay,format=yuv420p[v]"
            else:
                filter += f"[bg{j}][f{j}]overlay[bg{j + 1}];"

        if len(images) > 2:
            args2.extend(("-filter_complex", filter, "-map", "[v]", "-map",
                          str(len(images)) + ":a"))
        else:
            args2.extend(("-pix_fmt", "yuv420p"))
        if step_data.get_config("h264_nvenc", False):
            args2.extend(("-c:v", "h264_nvenc"))

        args2.extend(("-s", "1920x1080", output2))
        subprocess.run(args2,
                       stdout=subprocess.PIPE,
                       stderr=subprocess.STDOUT,
                       check=True)

        values["sequence"] = output2

    except subprocess.CalledProcessError as e:
        raise FFmpegError(e.returncode, e.output.decode("utf-8")) from e