Exemplo n.º 1
0
def two_pass_convert(input_path: str, output_path: str,
                     video_settings: GifSettings) -> List[FfmpegTask]:
    two_pass_args = video_settings.ffmpeg_options_two_pass
    task1 = FfmpegTask(global_options=["-y"],
                       inputs={input_path: None},
                       outputs={os.devnull: two_pass_args[0]})
    task2 = FfmpegTask(global_options=["-y"],
                       inputs={input_path: None},
                       outputs={output_path: two_pass_args[1]})
    return [task1, task2]
Exemplo n.º 2
0
 async def two_pass_convert(self, video_path: str,
                            gif_settings: GifSettings):
     # If it's too big, do a 2 pass run
     two_pass_filename = random_sandbox_video_path()
     # First pass
     two_pass_args = gif_settings.ffmpeg_options_two_pass
     task1 = FfmpegTask(global_options=["-y"],
                        inputs={video_path: None},
                        outputs={os.devnull: two_pass_args[0]})
     await self.worker.await_task(task1)
     task2 = FfmpegTask(global_options=["-y"],
                        inputs={video_path: None},
                        outputs={two_pass_filename: two_pass_args[1]})
     await self.worker.await_task(task2)
     return two_pass_filename
Exemplo n.º 3
0
 async def decompose_video(self, video_path: str, decompose_dir_path: str):
     task = FfmpegTask(
         inputs={video_path: None},
         outputs={f"{decompose_dir_path}/out%d.png": "-vf fps=5 -vsync 0"},
         global_options="-y"
     )
     await self.worker.await_task(task)
Exemplo n.º 4
0
 async def merge_messages(
         self, chat: Chat, cmd_message: Message,
         messages_to_merge: List[Message]) -> Optional[List[Message]]:
     if len(messages_to_merge) < 2:
         error_text = \
             "Merge commands require at least 2 videos to merge. " \
             "Please reply to a message, and provide telegram links to the other messages"
         return [await self.send_text_reply(chat, cmd_message, error_text)]
     num_files = len(messages_to_merge)
     filter_args = "".join([f"[{x}:v][{x}:a]" for x in range(num_files)
                            ]) + f" concat=n={num_files}:v=1:a=1 [v] [a]"
     output_args = f"-filter_complex \"{filter_args}\" -map \"[v]\" -map \"[a]\" -vsync 2"
     async with self.progress_message(chat, cmd_message, "Merging videos"):
         file_paths = await self.align_video_dimensions(
             [m.message_data.file_path for m in messages_to_merge])
         output_path = random_sandbox_video_path()
         task = FfmpegTask(
             inputs={file_path: None
                     for file_path in file_paths},
             outputs={output_path: output_args})
         await self.worker.await_task(task)
         tags = messages_to_merge[0].tags(self.database)
         tags.merge_all(
             [msg.tags(self.database) for msg in messages_to_merge[1:]])
         return [
             await self.send_video_reply(chat, cmd_message, output_path,
                                         tags)
         ]
 async def on_new_message(self, chat: Chat, message: Message):
     # If a message has text saying to rotate, and is a reply to a video, then cut it
     # `rotate left`, `rotate right`, `flip horizontal`?, `rotate 90`, `rotate 180`
     text_clean = message.text.strip().lower().replace("-", "")
     if text_clean.startswith("rotate"):
         transpose = self.get_rotate_direction(
             text_clean[len("rotate"):].strip())
     elif text_clean.startswith("flip"):
         transpose = self.get_flip_direction(
             text_clean[len("flip"):].strip())
     else:
         return
     video = find_video_for_message(chat, message)
     if video is None:
         await self.send_text_reply(
             chat, message,
             "Cannot work out which video you want to rotate/flip.")
     if transpose is None:
         return [
             await self.send_text_reply(
                 chat, message,
                 "I do not understand this rotate/flip command.")
         ]
     async with self.progress_message(chat, message,
                                      "Rotating or flipping video.."):
         output_path = random_sandbox_video_path()
         task = FfmpegTask(inputs={video.message_data.file_path: None},
                           outputs={output_path: f"-vf \"{transpose}\""})
         await self.worker.await_task(task)
         return [
             await self.send_video_reply(chat, message, output_path,
                                         video.tags(self.database))
         ]
Exemplo n.º 6
0
 async def cut_out_video(self, video: Message, start: str, end: str) -> str:
     first_part_path = random_sandbox_video_path()
     second_part_path = random_sandbox_video_path()
     task1 = FfmpegTask(inputs={video.message_data.file_path: None},
                        outputs={first_part_path: f"-to {start}"})
     task2 = FfmpegTask(inputs={video.message_data.file_path: None},
                        outputs={second_part_path: f"-ss {end}"})
     await self.worker.await_tasks([task1, task2])
     inputs_file = random_sandbox_video_path("txt")
     with open(inputs_file, "w") as f:
         f.write(
             f"file '{first_part_path.split('/')[1]}'\nfile '{second_part_path.split('/')[1]}'"
         )
     output_path = random_sandbox_video_path()
     task_concat = FfmpegTask(inputs={inputs_file: "-safe 0 -f concat"},
                              outputs={output_path: "-c copy"})
     await self.worker.await_task(task_concat)
     return output_path
Exemplo n.º 7
0
 async def single_pass_convert(self, video_path: str,
                               gif_settings: GifSettings):
     first_pass_filename = random_sandbox_video_path()
     # first attempt
     ffmpeg_args = gif_settings.ffmpeg_options_one_pass
     task = FfmpegTask(inputs={video_path: None},
                       outputs={first_pass_filename: ffmpeg_args})
     await self.worker.await_task(task)
     return first_pass_filename
Exemplo n.º 8
0
 async def convert_file(self, video_path: str) -> str:
     if video_path.endswith(".gif"):
         return await self.convert_video_to_telegram_gif(video_path)
     else:
         processed_path = random_sandbox_video_path()
         task = FfmpegTask(inputs={video_path: None},
                           outputs={processed_path: "-qscale 0"})
         await self.worker.await_task(task)
         return processed_path
Exemplo n.º 9
0
 async def cut_video(self, video: Message, start: Optional[str],
                     end: Optional[str]) -> str:
     new_path = random_sandbox_video_path()
     out_string = (f"-ss {start}" if start is not None else
                   "") + " " + (f"-to {end}" if end is not None else "")
     task = FfmpegTask(inputs={video.message_data.file_path: None},
                       outputs={new_path: out_string})
     await self.worker.await_task(task)
     return new_path
Exemplo n.º 10
0
def add_audio_track_task(input_path: str, output_path: str) -> FfmpegTask:
    return FfmpegTask(global_options=["-f lavfi"],
                      inputs={
                          "aevalsrc=0": None,
                          input_path: None
                      },
                      outputs={
                          output_path:
                          "-qscale:v 0 -acodec aac -map 0:0 -map 1:0 -shortest"
                      })
Exemplo n.º 11
0
 async def detect_crop(self, video_path: str) -> Optional[str]:
     task = FfmpegTask(inputs={video_path: None},
                       outputs={"-": "-vf cropdetect=24:16:0 -f null"})
     output, error = await self.worker.await_task(task)
     crop_match = re.compile(r"crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+")
     last_match = None
     for last_match in crop_match.finditer(error):
         pass
     if last_match is None:
         return None
     return last_match.group(0)
Exemplo n.º 12
0
 async def scale_and_pad_to_dimensions(self, file_path: str,
                                       dimensions: Tuple[int, int]) -> str:
     orig_dimensions = await self.get_video_dimensions(file_path)
     if orig_dimensions == dimensions:
         return file_path
     output_path = random_sandbox_video_path()
     x, y = dimensions
     args = f"-vf \"scale={x}:{y}:force_original_aspect_ratio=decrease,pad={x}:{y}:(ow-iw)/2:(oh-ih)/2,setsar=1\""
     task = FfmpegTask(inputs={file_path: None},
                       outputs={output_path: args})
     await self.worker.await_task(task)
     return output_path
Exemplo n.º 13
0
def video_to_video(input_path: str, output_path: str,
                   video_settings: Optional[GifSettings]) -> List[FfmpegTask]:
    if not video_settings:
        return [
            FfmpegTask(inputs={input_path: None},
                       outputs={output_path: "-qscale 0"})
        ]
    if video_settings.bitrate:
        tasks = two_pass_convert(input_path, output_path, video_settings)
    else:
        tasks = [single_pass_convert(input_path, output_path, video_settings)]
    return tasks
Exemplo n.º 14
0
 async def on_new_message(self, chat: Chat, message: Message):
     # If a message has text saying to crop, some percentages maybe?
     # And is a reply to a video, then crop it
     text_clean = message.text.lower().strip()
     if not text_clean.startswith("crop"):
         return
     video = find_video_for_message(chat, message)
     if video is None:
         return [
             await self.send_text_reply(
                 chat, message,
                 "I'm not sure which video you would like to crop.")
         ]
     crop_args = text_clean[len("crop"):].strip()
     if crop_args.lower() == "auto":
         async with self.progress_message(chat, message,
                                          "Detecting auto crop settings"):
             crop_string = await self.detect_crop(
                 video.message_data.file_path)
         if crop_string is None:
             return [
                 await self.send_text_reply(
                     chat, message, "That video could not be auto cropped.")
             ]
     else:
         crop_string = self.parse_crop_input(crop_args)
     if crop_string is None:
         return [
             await self.send_text_reply(
                 chat, message, "I don't understand this crop command. "
                 "Please specify what percentage to cut off the left, right, top, bottom. "
                 "Alternatively specify the desired percentage for the width and height. "
                 "Use the format `crop left 20% right 20% top 10%`. "
                 "If the video has black bars you wish to crop, just use `crop auto`"
             )
         ]
     output_path = random_sandbox_video_path()
     async with self.progress_message(chat, message, "Cropping video"):
         task = FfmpegTask(inputs={video.message_data.file_path: None},
                           outputs={
                               output_path:
                               f"-filter:v \"{crop_string}\" -c:a copy"
                           })
         await self.worker.await_task(task)
         return [
             await self.send_video_reply(chat, message, output_path,
                                         video.tags(self.database))
         ]
Exemplo n.º 15
0
 async def on_new_message(self, chat: Chat,
                          message: Message) -> Optional[List[Message]]:
     clean_text = message.text.strip().lower()
     if clean_text != "reverse":
         return None
     video = find_video_for_message(chat, message)
     if video is None:
         return [
             await self.send_text_reply(
                 chat, message,
                 "Please reply to the video you want to reverse")
         ]
     output_path = random_sandbox_video_path()
     reverse_task = FfmpegTask(
         inputs={video.message_data.file_path: None},
         outputs={output_path: "-vf reverse -af areverse"})
     async with self.progress_message(chat, message, "Reversing video"):
         await self.worker.await_task(reverse_task)
         return [
             await self.send_video_reply(chat, message, output_path,
                                         video.tags(self.database))
         ]
Exemplo n.º 16
0
 async def on_new_message(self, chat: Chat,
                          message: Message) -> Optional[List[Message]]:
     text_clean = message.text.lower().strip()
     if text_clean not in [
             "stabilise", "stabilize", "stab", "deshake", "unshake"
     ]:
         return
     video = find_video_for_message(chat, message)
     if video is None:
         return [
             await self.send_text_reply(
                 chat, message,
                 "I'm not sure which video you would like to stabilise.")
         ]
     output_path = random_sandbox_video_path()
     async with self.progress_message(chat, message, "Stabilising video"):
         task = FfmpegTask(inputs={video.message_data.file_path: None},
                           outputs={output_path: "-vf deshake"})
         await self.worker.await_task(task)
         return [
             await self.send_video_reply(chat, message, output_path,
                                         video.tags(self.database))
         ]
Exemplo n.º 17
0
def single_pass_convert(input_path: str, output_path: str,
                        video_settings: GifSettings) -> FfmpegTask:
    # first attempt
    ffmpeg_args = video_settings.ffmpeg_options_one_pass
    return FfmpegTask(inputs={input_path: None},
                      outputs={output_path: ffmpeg_args})