def _combine(sequence_out: list, step_data: StepData, values: dict): try: args = ["ffmpeg", "-loglevel", "8", "-i"] concat = "concat:" file_temp = [] output = resources.get_temp_resource_path(f"file.mkv", step_data.data["_pipe_id"]) for idx, file in enumerate(sequence_out): temp_file = resources.get_temp_resource_path(f"temp{idx}.ts", step_data.data["_pipe_id"]) args2 = ["ffmpeg", "-loglevel", "8", "-i", file, "-c", "copy", "-bsf:v", "h264_mp4toannexb", "-f", "mpegts", temp_file] subprocess.run(args2, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) file_temp.append(temp_file) for idx, file in enumerate(file_temp): if idx != 0: concat += "|" concat += file args.extend((concat, "-codec", "copy", output)) subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) new_output = resources.get_out_path(values["out_time"], step_data.get_config("output_path", ""), step_data.get_config("job_name", "")) args = ["ffmpeg", "-loglevel", "8", "-i", output, new_output] subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) values["sequence"] = output except subprocess.CalledProcessError as e: raise FFmpegError(e.returncode, e.output.decode("utf-8")) from e
def storing(values: dict, data: StepData): """Schreibt die API-Daten nach Ausführung der `transform`-Typen in eine JSON-Datei. Als Dateiname wird der Jobname sowie das heutige Datum verwendet. :param values: Werte aus der JSON-Datei :param data: Daten aus der API """ if values.get("storing", None): for value in values["storing"]: new_data = _remove_keys(value, data, data.get_data(value["key"], values)) name = data.format(value["name"]) if value.get("safe_only_on_change", True): try: with resources.open_specific_memory_resource( data.get_config("job_name"), name, False) as fp: old_data = json.loads(fp.read()) if old_data == new_data: continue except (FileNotFoundError, IndexError): pass with open( resources.new_memory_resource_path( data.get_config("job_name"), name), 'w') as fp: json.dump(new_data, fp) delete_memory_files( data.get_config("job_name"), value["name"], data.get_data(value.get("count", 10), values, int))
def init_pipeline(data: StepData, pipeline_id: str, step_name: str, idx=None, config=None, no_tmp_dir=False): config["job_name"] = data.get_config("job_name") if no_tmp_dir: config["attach_mode"] = "combined" config["output_path"] = data.get_config("output_path") else: config["attach_mode"] = "separate" config["output_path"] = get_relative_temp_resource_path( "", data.data["_pipe_id"]) if not idx is None: config["job_name"] = f"{config['job_name']}_subtask_{idx}" config = {**STEPS_BASE_CONFIG, **config} # Avoid mutual imports from visuanalytics.analytics.control.procedures.pipeline import Pipeline return Pipeline(data.data["_job_id"], pipeline_id, step_name, config, attach_mode=True, no_tmp_dir=no_tmp_dir)
def request_memory(values: dict, data: StepData, name: str, save_key, ignore_testing=False): """Ließt Daten aus einer memory-Datei (JSON-Format) zu einem bestimmtem Datum. :param values: Werte aus der JSON-Datei :param data: Daten aus der API :param name: Testdatei, die geladen werden soll. :param save_key: Key, unter dem die Daten gespeichert werden. :param ignore_testing: Ob der Request durchgeführt werden soll, obwohl testing `true` ist. """ try: if values.get("timedelta", None) is None: with resources.open_specific_memory_resource( data.get_config("job_name"), values["name"], values.get("use_last", 1)) as fp: data.insert_data(save_key, json.loads(fp.read()), values) else: with resources.open_memory_resource(data.get_config("job_name"), values["name"], values["timedelta"]) as fp: data.insert_data(save_key, json.loads(fp.read()), values) except (FileNotFoundError, IndexError): api_request(values["alternative"], data, name, save_key, ignore_testing)
def _copy_and_rename(src_file: str, values: dict, step_data: StepData): out_path = resources.path_from_root(step_data.get_config("output_path")) values["thumbnail"] = resources.get_out_path( values["out_time"], step_data.get_config("output_path"), step_data.get_config("job_name"), format=".png", thumbnail=True) shutil.copy(src_file, out_path) os.rename(os.path.join(out_path, os.path.basename(src_file)), values["thumbnail"])
def precondition(values: dict, step_data: StepData): if values.get("precondition", None): if step_data.get_config("testing", False) is False: api_func = get_type_func(values["precondition"], Precondition_TYPES) api_func(values, step_data)
def request_multiple_custom(values: dict, data: StepData, name: str, save_key, ignore_testing=False): """Fragt unterschiedliche Daten einer API ab. :param values: Werte aus der JSON-Datei :param data: Daten aus der API :param name: Testdatei, die geladen werden soll. :param save_key: Key, unter dem die Daten gespeichert werden. :param ignore_testing: Ob der Request durchgeführt werden soll, obwohl testing `true` ist. """ if data.get_config("testing", False) and not ignore_testing: return _load_test_data(values, data, name, save_key) if values.get("use_loop_as_key", False): data.insert_data(save_key, {}, values) for idx, key in enumerate(values["steps_value"]): api_request(values["requests"][idx], data, name, f"{save_key}|{key}", ignore_testing) else: data.insert_data(save_key, [None] * len(values["requests"]), values) for idx, value in enumerate(values["requests"]): api_request(value, data, name, f"{save_key}|{idx}", ignore_testing)
def request_multiple(values: dict, data: StepData, name: str, save_key, ignore_testing=False): """Fragt für einen variablen Key, mehrere Male gewünschte Daten einer API ab. :param values: Werte aus der JSON-Datei :param data: Daten aus der API :param name: Testdatei, die geladen werden soll. :param save_key: Key, unter dem die Daten gespeichert werden. :param ignore_testing: Ob der Request durchgeführt werden soll, obwohl testing `true` ist. """ if data.get_config("testing", False) and not ignore_testing: return _load_test_data(values, data, name, save_key) if data.get_data(values.get("use_loop_as_key", False), values, bool): data.insert_data(save_key, {}, values) for _, key in data.loop_array(values["steps_value"], values): _fetch(values, data, f"{save_key}|{key}") else: data.insert_data(save_key, [None] * len(values["steps_value"]), values) for idx, _ in data.loop_array(values["steps_value"], values): _fetch( values, data, f"{save_key}|{idx}", )
def __on_completion(self, values: dict, data: StepData): # Set state to ready self.__current_step = self.__steps_max # Set endTime and log self.__end_time = time.time() completion_time = round(self.__end_time - self.__start_time, 2) logger.info( f"{self.__log_name} {self.id} finished in {completion_time}s") # Update DB logs self.__update_db(update_log_finish, self.__log_id, self.__log_states["finished"], completion_time) if self.__attach_mode: return # Check and delete video delete_video(self.steps_config, self.__config) cp_request = data.get_config("on_completion") # IF ON Completion is in config send request if cp_request is not None: try: logger.info("Send completion notice...") # Save video name and thumbnail name to config video_name = os.path.basename(values["sequence"]) data.insert_data("_conf|video_path", values["sequence"], {}) data.insert_data("_conf|video_name", video_name, {}) data.insert_data("_conf|video_id", os.path.splitext(video_name)[0], {}) if isinstance(values["thumbnail"], str): thumbnail_name = os.path.basename(values["thumbnail"]) data.insert_data("_conf|thumbnail_path", values["thumbnail"], {}) data.insert_data("_conf|thumbnail_name", thumbnail_name, {}) data.insert_data("_conf|thumbnail_id", os.path.splitext(thumbnail_name)[0], {}) # make request api_request(cp_request, data, "", "_comp", True) logger.info("Completion report sent out!") except Exception: logger.exception("Completion report could not be sent: ")
def link(values: dict, step_data: StepData): """Überprüft, welcher Typ der Video-Generierung vorliegt und ruft die passende Typ-Methode auf. :param values: Werte aus der JSON-Datei :param step_data: Daten aus der API :return: Pfad zum Output-Video :rtype: str """ out_images, out_audios, out_audio_l = [], [], [] attach_mode = step_data.get_config("attach_mode", "") seq_func = get_type_func(values["sequence"], SEQUENCE_TYPES) seq_func(values, step_data, out_images, out_audios, out_audio_l) if step_data.get_config("attach", None) is not None and not attach_mode: if not step_data.get_config("separate_rendering", False): for item in step_data.get_config("attach", None): pipeline = init_pipeline(step_data, step_data.data["_pipe_id"], item["steps"], config=item.get("config", {}), no_tmp_dir=True) pipeline.start() # Add images and audios from the pipeline extend_out_config(pipeline.config["sequence"], out_images, out_audios, out_audio_l) _generate(out_images, out_audios, out_audio_l, step_data, values) else: # Save and manipulate out path (to save video to tmp dir) out_path = step_data.get_config("output_path") step_data.data["_conf"]["output_path"] = get_relative_temp_resource_path("", step_data.data["_pipe_id"]) _generate(out_images, out_audios, out_audio_l, step_data, values) # Resote out_path step_data.data["_conf"]["output_path"] = out_path sequence_out = [values["sequence"]] for idx, item in enumerate(step_data.get_config("attach", None)): pipeline = init_pipeline(step_data, uuid.uuid4().hex, item["steps"], idx, item.get("config", {})) pipeline.start() sequence_out.append(pipeline.config["sequence"]) _combine(sequence_out, step_data, values) else: if attach_mode == "combined": values["sequence"] = { "out_images": out_images, "out_audios": out_audios, "out_audio_l": out_audio_l } else: _generate(out_images, out_audios, out_audio_l, step_data, values)
def thumbnail(values: dict, step_data: StepData): if step_data.get_config("thumbnail", False) is False: return thumbnail = values["thumbnail"] seq_func = get_type_func(values["thumbnail"], THUMBNAIL_TYPES) seq_func(values, step_data) if "size_x" in thumbnail and "size_y" in thumbnail: size_x = step_data.get_data(thumbnail["size_x"], None, numbers.Number) size_y = step_data.get_data(thumbnail["size_y"], None, numbers.Number) source_img = Image.open(values["thumbnail"]) source_img = source_img.resize([size_x, size_y], Image.LANCZOS) source_img.save(values["thumbnail"])
def request(values: dict, data: StepData, name: str, save_key, ignore_testing=False): """Fragt einmal die gewünschten Daten einer API ab. :param values: Werte aus der JSON-Datei :param data: Daten aus der API :param name: Testdatei, die geladen werden soll. :param save_key: Key, unter dem die Daten gespeichert werden. :param ignore_testing: Ob der Request durchgeführt werden soll, obwohl testing `true` ist. """ if data.get_config("testing", False) and not ignore_testing: return _load_test_data(values, data, name, save_key) _fetch(values, data, save_key)
def _generate(images, audios, audio_l, step_data: StepData, values: dict): try: if step_data.get_config("h264_nvenc", False): os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda/lib64" # Concatenate audio files if values["sequence"].get("audio_breaks", False): temp_audios = [] for idx, s_audio in enumerate(audios): temp_audios.append(resources.new_temp_resource_path(step_data.data["_pipe_id"], "wav")) args = ["ffmpeg", "-loglevel", "8", "-i", s_audio, temp_audios[idx]] subprocess.run(args) combined_sound = AudioSegment.empty() for idx, i in enumerate(temp_audios): sound = AudioSegment.from_file(i, "wav") combined_sound += sound time_diff = audio_l[idx] - MP3(audios[idx]).info.length if time_diff > 0: silence = AudioSegment.silent(duration=time_diff * 1000) combined_sound += silence temp_output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "wav") combined_sound.export(temp_output, format="wav") output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "mp3") args = ["ffmpeg", "-loglevel", "8", "-i", temp_output, "-vn", "-ar", "44100", "-ac", "2", "-b:a", "192k", output] subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) else: with open(resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]), "w") as file: for i in audios: file.write("file 'file:" + i + "'\n") output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "mp3") args1 = ["ffmpeg", "-loglevel", "8", "-f", "concat", "-safe", "0", "-i", resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]), "-c", "copy", output] subprocess.run(args1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) # Generate video output2 = resources.get_out_path(values["out_time"], step_data.get_config("output_path", ""), step_data.get_config("job_name", "")) if step_data.get_config("separate_rendering", False): output2 = resources.get_out_path(values["out_time"], step_data.get_config("output_path", ""), step_data.get_config("job_name", "") + "_0") args2 = ["ffmpeg", "-loglevel", "8", "-y"] for i in range(0, len(images)): args2.extend(("-loop", "1", "-t", str(round(audio_l[i], 2)), "-i", images[i])) args2.extend(("-i", output, "-c:a", "copy")) filter = "" for i in range(0, len(images) - 1): filter += f"[{i + 1}]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, i)}/TB[f{i}];" for j in range(0, len(images) - 1): if j == 0: filter += "[0][f0]overlay[bg1];" elif j == len(images) - 2: filter += f"[bg{j}][f{j}]overlay,format=yuv420p[v]" else: filter += f"[bg{j}][f{j}]overlay[bg{j + 1}];" if len(images) > 1: if len(images) == 2: filter = f"[1]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, 0)}/TB[f0];[0][f0]overlay,format=yuv420p[v]" args2.extend(("-filter_complex", filter, "-map", "[v]", "-map", str(len(images)) + ":a")) else: args2.extend(("-pix_fmt", "yuv420p")) if step_data.get_config("h264_nvenc", False): args2.extend(("-c:v", "h264_nvenc")) args2.extend(("-s", "1920x1080", output2)) subprocess.run(args2, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) values["sequence"] = output2 except subprocess.CalledProcessError as e: raise FFmpegError(e.returncode, e.output.decode("utf-8")) from e
def generate_diagram(values: dict, step_data: StepData, prev_paths): """ Alte Implementierung der Diagramme für alte Job-Konfigurationen """ data = step_data.format(values["data"]) data = data[1:len(data) - 1].split(", ") data = list(map(float, data)) days = [] labels = None if values.get("bar_label", None) is not None: labels = step_data.format(values["bar_label"]) labels = labels[1:len(labels) - 1].split(", ") labels = list(map(str, labels)) if step_data.format(values.get("label_use_date", False)): now = datetime.now() for hop_value in range(len(data)): day = now - timedelta(days=hop_value) days.insert(0, day.strftime('%d.%m')) else: days = step_data.format(values["label"]) days = days.replace("'", "") days = days[1:len(days) - 1].split(", ") days = list(map(str, days)) if values.get("label_append", None) is not None: replace_cities = values["label_append"] for idx, city in enumerate(days): for replace_city in replace_cities: if city == replace_city: label_append_value = str(values["label_append_value"]) label_append_value = label_append_value.replace( "_idx", str(idx)) days[idx] = city + " " + step_data.format( label_append_value) plt.rcParams.update( {'font.size': step_data.format(values.get("label_size", 18))}) fig = plt.figure(figsize=[ step_data.format(values.get("plot_size_x", 20)), step_data.format(values.get("plot_size_y", 10)) ]) ax = fig.add_subplot(111) if step_data.format(values.get("sorted", False)): if labels is None: days, data = zip(*sorted(zip(days, data))) else: days, data, labels = zip(*sorted(zip(days, data, labels))) for axis in ['top', 'bottom', 'left', 'right']: ax.spines[axis].set_linewidth( step_data.format(values.get("axis_depth", 1))) if values.get("grid_axis", None) is not None: ax.grid(axis=step_data.format(values["grid_axis"]), color=step_data.format(values.get("grid_color", "grey"))) if step_data.format(values["plot_type"]) == "bar_chart": ax.set_yticks(np.arange(len(days))) ax.set_yticklabels(days) if step_data.get_config("marked_city", None) is not None: for l in ax.get_yticklabels(): if l.get_text() == step_data.format( step_data.get_config("marked_city", "")): l.set_fontweight(550) ax.invert_yaxis() # labels von oben nach unten bar_list = plt.barh( np.arange(len(days)), data, color=(step_data.format(values["label_color"].get("r", 0)), step_data.format(values["label_color"].get("g", 0)), step_data.format(values["label_color"].get("b", 0)), step_data.format(values["label_color"].get("t", 1)))) elif step_data.format(values["plot_type"]) == "column_chart": bar_list = plt.bar( days, data, color=(step_data.format(values["label_color"].get("r", 0)), step_data.format(values["label_color"].get("g", 0)), step_data.format(values["label_color"].get("b", 0)), step_data.format(values["label_color"].get("t", 1)))) else: raise if step_data.format(values.get("use_extended_labels", False)): current_value = 0 x_label_list = [current_value] max_value = max(data) hop_value = 10 hop_values = values.get("extended_labels_map", None) if hop_values is not None: for entry in hop_values: if entry["value"] < max_value: hop_value = entry["step"] while current_value < max_value: current_value = current_value + hop_value x_label_list.append(current_value) counter = 0 counters = values.get("extended_labels_append", None) if counters is not None: for entry in counters: if entry["value"] < max_value: counter = entry["amount"] while counter != 0: current_value = current_value + hop_value x_label_list.append(current_value) counter = counter - 1 ax.set_xticks(x_label_list) for idx, b in enumerate(bar_list): color_not_set = True for entry in values["bar_colors"]["list"]: if data[idx] > step_data.format(entry["number"]): b.set_color(step_data.format(entry["color"])) color_not_set = False if color_not_set: b.set_color(step_data.format(values["bar_colors"]["default"])) plt.xticks(rotation=step_data.format(values.get("label_rotation", 0))) plt.tight_layout() if values.get("bar_label", None) is not None: rects = ax.patches for rect, label, data, days in zip(rects, labels, data, days): if step_data.format(values.get("show_bar_label_sign", False)): if label[0] != '-' and float(label) != 0.0: label = "+" + label if float(label) == 0.0: label = "\u00B10" label = str(data) + " / " + label if step_data.get_config("marked_city", None) is not None: if days == step_data.format( step_data.get_config("marked_city", "")): ax.text(rect.get_width() + 0.4, (rect.get_y() + rect.get_height() / 2) + 0.2, label, fontsize=step_data.format( values["label_fontsize"]), fontweight=550) else: ax.text(rect.get_width() + 0.4, (rect.get_y() + rect.get_height() / 2) + 0.2, label, fontsize=step_data.format( values["label_fontsize"])) file = resources.new_temp_resource_path(step_data.data["_pipe_id"], "png") plt.savefig(file, transparent=True) return file
def _generate(images, audios, audio_l, step_data: StepData, values: dict): try: if step_data.get_config("h264_nvenc", False): os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda/lib64" # Concatenate audio files with open( resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]), "w") as file: for i in audios: file.write("file 'file:" + i + "'\n") output = resources.new_temp_resource_path(step_data.data["_pipe_id"], "mp3") args1 = [ "ffmpeg", "-loglevel", "8", "-f", "concat", "-safe", "0", "-i", resources.get_temp_resource_path("input.txt", step_data.data["_pipe_id"]), "-c", "copy", output ] subprocess.run(args1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) # Generate video output2 = resources.get_out_path( values["out_time"], step_data.get_config("output_path", ""), step_data.get_config("job_name", "")) if step_data.get_config("separate_rendering", False): output2 = resources.get_out_path( values["out_time"], step_data.get_config("output_path", ""), step_data.get_config("job_name", "") + "_0") args2 = ["ffmpeg", "-loglevel", "8", "-y"] for i in range(0, len(images)): args2.extend(("-loop", "1", "-t", str(round(audio_l[i], 2)), "-i", images[i])) args2.extend(("-i", output, "-c:a", "copy")) filter = "" for i in range(0, len(images) - 1): filter += f"[{i + 1}]format=yuva444p,fade=d={values['sequence'].get('transitions', 0.8)}:t=in:alpha=1,setpts=PTS-STARTPTS+{_sum_audio_l(audio_l, i)}/TB[f{i}];" for j in range(0, len(images) - 1): if j == 0: filter += "[0][f0]overlay[bg1];" elif j == len(images) - 2: filter += f"[bg{j}][f{j}]overlay,format=yuv420p[v]" else: filter += f"[bg{j}][f{j}]overlay[bg{j + 1}];" if len(images) > 2: args2.extend(("-filter_complex", filter, "-map", "[v]", "-map", str(len(images)) + ":a")) else: args2.extend(("-pix_fmt", "yuv420p")) if step_data.get_config("h264_nvenc", False): args2.extend(("-c:v", "h264_nvenc")) args2.extend(("-s", "1920x1080", output2)) subprocess.run(args2, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) values["sequence"] = output2 except subprocess.CalledProcessError as e: raise FFmpegError(e.returncode, e.output.decode("utf-8")) from e