Beispiel #1
0
def movie_diagnostics_list_duplicates(filter_mov=None):
    database = MovieDatabaseSingleton().db()
    duplicate_imdb = database.find_duplicates("imdb")
    print("scanning for duplicate movies")
    if not duplicate_imdb:
        print("found no duplicate movies")
        return
    print("found duplicate movies:")
    omdb = OMDb()
    for imdb_id in duplicate_imdb:
        dup_mov = []
        for mov in duplicate_imdb[imdb_id]:
            if filter_mov and filter_mov.lower() not in mov.lower():
                continue
            if database.is_removed(mov):
                continue
            if any([x.value.lower() in mov.lower() for x in AllowedDuplicate]):
                continue
            dup_mov.append(mov)
        if len(dup_mov) > 1:
            resp = omdb.movie_search(imdb_id=imdb_id)
            if resp.title and resp.year:
                pfcs(f"g[{imdb_id}] (OMDb: b[{resp.title} - {resp.year}] )")
            else:
                print(imdb_id + ":")
            for mov in dup_mov:
                print("   " + mov)
Beispiel #2
0
def tv_diagnostics_find_removed(filter_show=None):
    # TODO: use filter
    print("finding removed shows and episodes")
    database_ep = EpisodeDatabaseSingleton().db()
    database_show = ShowDatabaseSingleton().db()
    episode_files = [
        episode_filename
        for _, episode_filename in util_tv.list_all_episodes(use_cache=False)
    ]
    removed_episodes = [
        episode for episode in database_ep
        if episode not in episode_files and not database_ep.is_removed(episode)
    ]
    found_removed = False
    for episode in removed_episodes:
        pfcs(f"found removed episode: w[{episode}]")
        database_ep.mark_removed(episode)
        found_removed = True
    removed_shows = [
        show for show in database_show if show not in util_tv.list_all_shows()
        and not database_show.is_removed(show)
    ]
    for show_dir in removed_shows:
        pfcs(f"found removed show: w[{show_dir}]")
        database_show.mark_removed(show_dir)
        found_removed = True
    return found_removed
Beispiel #3
0
def process_movie_dir(movie_dir_source: Path):
    name = movie_dir_source.name
    pfcs(f"processing: i[{name}] as type b[movie dir]")
    nfo_loc = find_nfo_file_in_path(movie_dir_source)
    rar_loc = find_rar_in_path(movie_dir_source)
    mkv_loc = find_mkv_in_path(movie_dir_source)
    if not rar_loc and not mkv_loc:
        pfcs(f"could e[not] find item to process in w[{movie_dir_source}]!")
        return
    if rar_loc and mkv_loc:
        pfcs(f"found e[both] rar and mkv in w[{movie_dir_source}]!")
        return
    pfcs(f"found file: i[{mkv_loc or rar_loc}]")
    dest = determine_movie_destination(name)
    pfcs(f"destination: i[{dest}]")
    if rar_loc:
        if not run.extract(rar_loc, dest, create_dirs=True):
            return  # extract failed
    if mkv_loc:
        run.move_file(mkv_loc, dest, create_dirs=True)
    if nfo_loc:
        imdb_id = util.parse_imdbid_from_file(nfo_loc)
        if imdb_id:
            print(
                f"found imdb-id: {cstr(imdb_id, 154)}, will create movie.nfo")
            util_movie.create_movie_nfo(dest, imdb_id)
    shutil.rmtree(movie_dir_source)
    print(f'removed {cstr(movie_dir_source, "orange")}')
Beispiel #4
0
def download(item: dict, extr: bool = False, server=Server.WB1, limit=None):
    file_name = item["name"]
    repl_list = [
        (" ", r"\ "),  # TODO: use escaped for all?
        ("'", "*"),
        ("[", "*"),
        ("]", "*"),
        ("(", "*"),
        (")", "*")
    ]
    for char, repl in repl_list:
        file_name = file_name.replace(char, repl)
    print(f'downloading: {cstr(file_name, "orange")}')
    conf = config.ConfigurationManager()
    dl_dir = conf.get("path_download")
    limit_str = f"-l {limit} " if limit is not None else ""
    command = f'scp {limit_str}-r {server.value}:"~/files/{file_name}" "{dl_dir}"'
    local_command(command, hide_output=False)
    # only run extract if dest was default dl dir
    if extr:
        path_for_extract_cmd = Path(dl_dir) / file_name
        if not path_for_extract_cmd.exists():
            return
        pfcs(f"running extract command on: g[{path_for_extract_cmd}]")
        extract.extract_item(path_for_extract_cmd)
Beispiel #5
0
 def retrieve_info(self):
     self.log("attempting to retrieve video info using youtube-dl...")
     formats = [f for f in YoutubeDLFormats]
     preferred = YoutubeDLFormats.preferred_format(self.url)
     if preferred is not None:
         self.log(
             fcs(f"trying preferred format first: p<{preferred.value}>",
                 format_chars=["<", ">"]))
         formats.remove(preferred)
         formats.insert(0, preferred)
     for vid_format in formats:
         self.log(f"trying format: {vid_format.value}")
         self.options["format"] = vid_format.value
         self.format = vid_format
         try:
             with youtube_dl.YoutubeDL(self.options) as ydl:
                 self.info = ydl.extract_info(self.url, download=False)
                 self.filename = self.generate_filename()
             self.log(
                 fcs(f"i[success!] generated filename: p[{self.filename}]"))
             self.log(
                 fcs(f"i<success!> using format: p<{self.format.value}>",
                     format_chars=["<", ">"]))
             return  # succeeded
         except youtube_dl.utils.DownloadError as error:
             self.log(error)
             pass
         except AttributeError:
             pass
     # did not succeed
     pfcs("e[error] could not retrieve info using youtube-dl!")
Beispiel #6
0
def image_size(path_to_image: str) -> tuple:
    if not util.is_file(path_to_image):
        pfcs(f'image file e[{path_to_image}] does not exist!')
        return None
    with Image.open(path_to_image) as image:
        return image.size
    return None
 def __init__(self, data: dict, cli_args):
     super().__init__(cli_args.verbose)
     self.raw_data = data
     self.name = data["name"]
     self.dest_path = self._parse_dest_path(data["dest"])
     self.log(f"set dest path: {self.dest_path}")
     self.filter_dict = data.get("filter", {})
     self.url = data["url"]
     self.use_title = data.get("use_title", False)
     self._force_dl = False
     if cli_args.simulate:
         self.disabled = random.randint(0, 100) > 90
     else:
         if data.get("process", False):
             self.disabled = False
         else:
             self.disabled = True
     self.skip_sub = data.get("skip_sub", False)
     self.downloaded_today = False
     self.airtimes: List[Airtime] = []
     self.set_log_prefix(f"show_{self.name}".replace(" ", "_").upper())
     self.log("init")
     pfcs(f"added show i[{self.name}]")
     for day, time in data["airtime"].items():
         self.airtimes.append(Airtime(day, time))
     if not self.disabled:
         self._validate()
     else:
         self.log(fcs("o[is disabled] will not process!"))
Beispiel #8
0
def handle_special_episode(show_id, filename):
    pfcs(f"determined as i[SPECIAL EP]")
    tv_maze_singleton = tvmaze.TvMazeData()
    specials = tv_maze_singleton.get_json_all_special_episodes(show_id)
    for spep in specials:
        name = spep.get("name", "").split(" ")
        # TODO: match season...
        if all(x.lower() in filename.lower() for x in name):
            return spep
    return {}
Beispiel #9
0
def load_untappd_export(file_path: Path):
    if file_path.is_file():
        with open(file_path) as export_file:
            try:
                return json.load(export_file)
            except Exception as error:
                pfcs(f"e[ERROR]: could not parse file: e[{file_path}]")
                return None
    pfcs(f"e[ERROR]: file does not exist: e[{file_path}]")
    return None
Beispiel #10
0
 def print(self):
     _name = self.parent_name or self.path.stem
     _type_str = fcs("o[UNKN]")
     if self.is_movie:
         _type_str = fcs("b[MOVI]")
     elif self.is_tvshow:
         _type_str = fcs("p[SHOW]")
         if self.parent_is_season_dir:
             _name = self.path.stem
     pfcs(f"i<[{self.index:04d}]> [{_type_str}] {_name}", format_chars=("<", ">"))
Beispiel #11
0
def parse_arg_date(arg_date):
    if arg_date:
        try:
            return datetime.strptime(arg_date, ARG_DATE_FMT)
        except TypeError:
            pfcs(f"could not parse --date filter: e[{arg_date}]")
            return None
        except ValueError:
            pfcs(f"wrong format for --date filter: e[{arg_date}]")
            return None
    return None
Beispiel #12
0
def get_movie_nfo_imdb_id(movie_dir: str, debug_print=False):
    "Get the imdb-id from a movie.nfo in the movie folder location"
    mstr = __name__ + ".get_movie_nfo_imdb_id"
    if not util.is_dir(movie_dir) and exists(movie_dir):
        movie_dir = movie_path(movie_dir)
    path = Path(movie_dir) / 'movie.nfo'
    if not path.is_file():
        pfcs(f"o[{mstr}] movie.nfo file does not exist in w[{movie_dir}]",
             show=debug_print)
        return None
    with open(path, 'r') as file_item:
        return util.parse_imdbid(file_item.readline())
Beispiel #13
0
def find_finished_downloads(extensions=util.video_extensions()):
    dst_path = nzbget_dest_path()
    if not util.is_dir(dst_path):
        pfcs(f'NZBGet destination path does not exist: e[{dst_path}]')
    found_files = []
    for dir_name, _, file_list in os.walk(dst_path):
        matched_files = [Path(dir_name) / file_item for file_item in file_list if any(
            ext in file_item for ext in extensions) and 'sample' not in file_item.lower()]
        if len(matched_files) == 1:
            found_files.append(matched_files[0])
        # TODO: handle len == 0 or len > 1
    return found_files
Beispiel #14
0
 def print(self, only_color_info=False, short=False):
     if short:
         pfcs(f"id: i[{self.id}] {self.product_id}:o[{self.name}]")
         return
     print(f"id: {self.id} ({self.product_id})")
     if only_color_info:
         state = self.raw_data.get("state", {})
         string = " ".join(f"{x}: {state.get(x)}"
                           for x in ["bri", "hue", "sat"])
         print(string)
     else:
         print("on:", self.on)
         print(json.dumps(self.raw_data, indent=4))
     print("*" * 20)
 def load_cookies_txt(self, file_path=None):
     self.init_session()
     if not file_path:
         file_path = ConfigurationManager().path("cookies_txt")
     if not Path(file_path).exists():
         file_path = Path(__file__).resolve().parent / "cookies.txt"
     if not Path(file_path).exists():
         pfcs("e[error]: could not find cookies.txt!")
         return
     # NOTE use: https://addons.mozilla.org/en-US/firefox/addon/export-cookies-txt/
     try:
         jar = MozillaCookieJar(file_path)
         jar.load(ignore_discard=True, ignore_expires=True)
         self.SESSION.cookies.update(jar)
     except LoadError as error:
         pfcs(f"w[warning] could not load cookies.txt:\n{error}")
Beispiel #16
0
def wb_download_items(items: list,
                      indexes: str,
                      extr=False,
                      server=Server.WB1,
                      limit=None):
    "Downloads the items passed, based on indexes, to dest_dir"
    items_to_dl = filter_using_get_arg_indexes(items, indexes)
    if not items_to_dl:
        return
    print("Will download the following:")
    for item in items_to_dl:
        pfcs(f" - g[{item['name']}]")
    [
        download(item, extr=extr, server=server, limit=limit)
        for item in items_to_dl
    ]
Beispiel #17
0
def scan_new_shows():
    print("searching tv location for new shows...")
    database = ShowDatabaseSingleton().db()
    shows_not_in_db = [
        show for show in util_tv.list_all_shows()
        if show not in database and not is_ds_special_dir(show)
    ]
    new = len(shows_not_in_db) > 0
    for new_show in shows_not_in_db:
        if is_ds_special_dir(new_show):
            continue
        data = process_new_show(new_show)
        database.insert(data)
        pfcs(f"added g[{new_show}] to database!")
        pfcs(f"d[{'-' * util.terminal_width()}]")
    if new:
        database.save()
    else:
        print("found no new shows")
Beispiel #18
0
def scan_episodes():
    print("searching tv location for new episodes...")
    new = False
    database = EpisodeDatabaseSingleton().db()
    for full_path_season_dir, episode_filename in util_tv.list_all_episodes(
            use_cache=False):
        if episode_filename in database or is_ds_special_dir(episode_filename):
            continue
        new = True
        full_path_to_show = Path(full_path_season_dir).parents[0]
        data = process_new_episode(episode_filename, full_path_to_show.name)
        database.insert(data)
        pfcs(f"added g[{episode_filename}] to database!")
        pfcs(f"d[{'-' * util.terminal_width()}]")
    if new:
        database.save()
        database.export_latest_added()
    else:
        print("found no new episodes")
Beispiel #19
0
def create_movie_nfo(movie_dir: str, imdb_id: str, debug_print=False):
    mstr = __name__ + ".create_movie_nfo"
    imdb_id = util.parse_imdbid(imdb_id)
    if not imdb_id:
        pfcs(f"o[{mstr}] could not parse imdb-id from e[{imdb_id}]",
             show=debug_print)
        return None
    if not util.is_dir(movie_dir) and exists(movie_dir):
        movie_dir = movie_path(movie_dir)
    if not util.is_dir(movie_dir):
        pfcs(
            f"o[{mstr}] could not determine location of e[{Path(movie_dir).name}]",
            show=debug_print)
        return None
    previous_imdb = get_movie_nfo_imdb_id(movie_dir, debug_print=debug_print)
    file_loc = Path(movie_dir) / 'movie.nfo'
    with open(file_loc, 'w') as file_item:
        file_item.write(f'https://www.imdb.com/title/{imdb_id}')
    if debug_print:
        prev_str = ""
        if previous_imdb:
            prev_str = f" previous id was o[{previous_imdb}]"
        pfcs(
            f"o[{mstr}] wrote g[{imdb_id}] to movie.nfo for g[{Path(movie_dir).name}]{prev_str}"
        )
Beispiel #20
0
def scan_movies():
    print("searching movie location for new movies...")
    database = MovieDatabaseSingleton().db()
    movies_not_in_db = [
        movie for movie in util_movie.list_all() if not database.exists(movie)
    ]
    new = False
    for new_movie in movies_not_in_db:
        if is_ds_special_dir(new_movie):
            continue
        data = process_new_movie(new_movie)
        if not data:
            continue
        database.insert(data)
        new = True
        pfcs(f"added g[{new_movie}] to database!")
        pfcs(f"d[{'-' * util.terminal_width()}]")
    if new:
        database.save()
        database.export_latest_added()
    else:
        print("found no new movies")
Beispiel #21
0
def filter_using_get_arg_indexes(items: list, indexes: str) -> list:
    if indexes.startswith("-"):  # download last x items
        return items[int(indexes):]
    indexes_to_dl = []
    if "+" in indexes and "-" in indexes:
        pfcs("e[indexes connot contain both '-' and '+'! aborting]")
    try:
        for ix_split in indexes.split(","):
            if "-" in ix_split:
                ranges = ix_split.split("-")
                ran = [r for r in range(int(ranges[0]), int(ranges[1]) + 1)]
                indexes_to_dl.extend(ran)
            elif "+" in ix_split:
                start_num = int(ix_split.split("+")[0])
                addition = int(ix_split.split("+")[1])
                ran = [r for r in range(start_num, start_num + addition + 1)]
                indexes_to_dl.extend(ran)
            else:
                indexes_to_dl.append(int(ix_split))
        return [item for item in items if item["index"] in indexes_to_dl]
    except:
        print(f'{cstr("could not parse indexes, aborting!", "red")}')
        return []
Beispiel #22
0
def main():
    args = get_args()
    srt_filenames = []
    if args.search_subscene is not None:
        if args.verbose:
            print("searching subscene")
        subscene = SubScene(args.search_subscene, verbose=args.verbose)
        for lang in [Language.English, Language.Swedish]:
            if args.lang is not None:
                if args.lang == "en" and lang != Language.English:
                    continue
                if args.lang == "sv" and lang != Language.Swedish:
                    continue
            sub = subscene.result.get_best(lang)
            if sub:
                srt_path = sub.download_and_unzip()
                if srt_path:
                    handle_srt(srt_path)
            else:
                print(f"could not find any subs for language: {lang}")
        return 0
    if "*" in args.file:
        items = list(Path().glob(args.file))
        if items:
            pfcs(f"found i[{len(items)}] item matching i[{args.file}]")
        for num, item in enumerate(items, 1):
            if len(items) > 1:
                pfcs(f"processing item i[{num}] of {len(items)}")
            if item.suffix.endswith("srt"):
                handle_srt(item.name, auto_move=args.auto_move)
            else:
                pfcs(f"skipping item w[{item.name}], not srt")
            print_line()
        print("done!")
        sys.exit(0)
    file_path = Path(args.file)
    if not file_path.exists():
        print("passed file does not exists")
        exit()
    if file_path.suffix.endswith('zip'):
        srt_filenames = find_srt_filenames_in_zip(file_path)
        if not srt_filenames:
            print("could not find srt in zip file!")
            exit()
        for srt_filename in srt_filenames:
            command = f"unzip -oj {file_path} {srt_filename}"
            if run.local_command(command, print_info=False):
                print(f"extracted {cstr(srt_filename, 154)}!")
    elif file_path.suffix.endswith('srt'):
        srt_filenames = [file_path.name]
    else:
        print("no subtitle file to process..")
        exit()
    [handle_srt(srt, auto_move=args.auto_move) for srt in srt_filenames]
Beispiel #23
0
def process_new_episode(episode_filename: str, show_folder: str) -> dict:
    pfcs(f"processing o[{episode_filename}]")
    data = {
        "filename": episode_filename,
        "scanned": util.now_timestamp(),
        "removed": False,
    }
    if "mythbusters" in show_folder.lower():
        season_number, episode_number = util_tv.parse_season_episode(
            episode_filename, season_as_year=True)
    else:
        season_number, episode_number = util_tv.parse_season_episode(
            episode_filename)
    data["season_number"] = season_number
    data["episode_number"] = episode_number
    tvmaze_data = {}
    database = ShowDatabaseSingleton().db()
    if show_folder in database:
        data["tvshow"] = show_folder
        show_id = database.get(show_folder, "tvmaze")
        pfcs(
            f"searching TVMaze for i[{episode_filename}]\n -> using b[{show_folder}]"
            f" season: b[{season_number}] episode: b[{episode_number}] show-id: b[{show_id}]"
        )
        if episode_number == 0:  # specials
            tvmaze_data = handle_special_episode(show_id, episode_filename)
        else:
            tvmaze_data = tvmaze.episode_search(show_folder,
                                                season_number,
                                                episode_number,
                                                show_maze_id=show_id)
    if tvmaze_data:
        if "id" in tvmaze_data:
            data["tvmaze"] = tvmaze_data["id"]
            pfcs(f" - got tvmaze id:   g[{data['tvmaze']}]")
        if "airstamp" in tvmaze_data:
            aired_date_str = tvmaze_data["airstamp"]
            aired_timestamp = util.date_str_to_timestamp(aired_date_str)
            if aired_timestamp:  # not 0
                data["released"] = aired_timestamp
                pfcs(f" - got aired date:  g[{aired_date_str[0:10]}]")
    return data
Beispiel #24
0
def process_movie_file(movie_file_path):
    movie_path = validate_path(movie_file_path)
    if not movie_file_path:
        return
    if not movie_file_path.is_file():
        print(f"path {movie_file_path.name} is not a file!")
        return
    pfcs(f"processing: i[{movie_file_path.name}] as type b[movie file]")
    if not movie_file_path.suffix in util.video_extensions():
        pfcs(f"could not determine destination for w[{movie_file_path.name}]")
        return
    directory = str(movie_file_path.name).replace(movie_file_path.suffix, "")
    dest = determine_movie_destination(directory)
    pfcs(f"destination: i[{dest}]")
    run.move_file(movie_file_path, dest, create_dirs=True)
Beispiel #25
0
def process_episode(ep_path: Path):
    ep_path = validate_path(ep_path)
    if not ep_path:
        return
    dest = determine_episode_destination(ep_path.name)
    if not dest:
        pfcs(f"could not determine destination for w[{ep_path}]")
        return
    if ep_path.is_dir():
        pfcs(f"processing: i[{ep_path.name}] as type b[episode dir]")
        rar_loc = find_rar_in_path(ep_path)
        if not run.extract(rar_loc, dest, create_dirs=True):
            return  # extract failed
        return
    pfcs(f"processing: i[{ep_path.name}] as type b[episode file]")
    run.move_file(ep_path, dest, create_dirs=True)
Beispiel #26
0
 def print_beer(self, data, cli_args):
     beer_obj = data["beer"]
     filter_name = cli_args.filter
     matched = False
     if filter_name and filter_name.lower() in beer_obj.name.lower():
         matched = True
     if filter_name and filter_name.lower() in beer_obj.brewery.lower():
         matched = True
     if filter_name and not matched:
         return False
     date_filter = parse_arg_date(cli_args.date)
     if date_filter:
         found_match = False
         for checkin_obj in data["checkins"]:
             if checkin_obj.date.date() == date_filter.date():
                 found_match = True
         if not found_match:
             return False
     print(beer_obj)
     num_checkins = len(data["checkins"])
     if not cli_args.show_all_checkins:
         pfcs(f"  Number of CheckIns: p[{num_checkins}]", end="")
     else:
         pfcs(f"  CheckIns:")
     if cli_args.show_all_checkins:
         for num, chin in enumerate(data["checkins"], 1):
             pfcs(f"   o[#{num:02d}] {chin}")
     elif num_checkins == 1:
         print(f" ({data['checkins'][0]})")
     elif num_checkins > 1:
         print(f" (most recent at {data['checkins'][-1]})")
     else:
         print()
     if cli_args.filenameify:
         pfcs(f"  y[{beer_obj.filename()}]")
     print()
     return True
Beispiel #27
0
def extract_item(source_item_path):
    source_item_path = validate_path(source_item_path)
    if not source_item_path:
        return
    name = source_item_path.name
    if util_movie.is_movie(name):
        process_movie(source_item_path)
    elif util_tv.is_episode(name):
        process_episode(source_item_path)
    elif util_tv.is_season(name):
        if source_item_path.is_dir():
            pfcs(f"processing: i[{name}] as type b[season dir]")
            for item in source_item_path.iterdir():
                extract_item(item)
            pfcs(f"g[done!] please remove w[{name}] manually.")
    else:
        pfcs(f"could not determine type of w[{name}]")
Beispiel #28
0
def move_nzbs_from_download():
    dest_dir = nzbget_nzb_path()
    if not util.is_dir(dest_dir):
        pfcs(f'destination dir does not exist: e[{dest_dir}]')
        return
    count = 0
    for dir_name, _, file_list in os.walk(user_download_dir()):
        nzb_files = [Path(
            dir_name) / file_item for file_item in file_list if file_item.endswith('.nzb')]
        if not nzb_files:
            continue
        for nzb_file in nzb_files:
            if move_file(nzb_file, dest_dir, debug_print=False):
                pfcs(f'moved i[{nzb_file.name}] to g[{dest_dir}]')
                count += 1
            else:
                pfcs(f'failed to move e[{nzb_file.name}] to w[{dest_dir}]!')
    return count
Beispiel #29
0
def wb_scp_torrents(server=Server.WB1):
    "send torrent files to wb watch dir"
    torrent_file_list = []
    conf = config.ConfigurationManager()
    dl_paths = [
        Path.home() / "mnt" / "downloads",
        Path(conf.get("path_download"))
    ]
    for dl_path in dl_paths:
        if not dl_path.exists():
            continue
        torrent_file_list += dl_path.glob("**/*.torrent")
    for torrent_file in torrent_file_list:
        command = f'scp "{str(torrent_file)}" {server.value}:~/watch'
        pfcs(f"sending torrent: g[{torrent_file.name}]")
        if local_command(command, hide_output=True):
            try:
                torrent_file.unlink()  # remove file
                pfcs(f"removed local torrent: o[{torrent_file.name}]")
            except:
                pfcs(f"failed to remove local torrent: e[{torrent_file.name}]")
Beispiel #30
0
def image_resize(path_to_image: str, width: int, heigh: int, new_file_name: None) -> bool:
    if not util.is_file(path_to_image):
        pfcs(f'image file e[{path_to_image}] does not exist!')
        return False
    old_img_path = Path(path_to_image)
    if not new_file_name:
        new_file_name = old_img_path.name.replace(
            old_img_path.suffix, "") + "_resized" + old_img_path.suffix
    elif isinstance(new_file_name, str):
        resized_img_path = Path(old_img_path.parent) / new_file_name
    elif isinstance(new_file_name, Path):
        resized_img_path = new_file_name
    else:
        pfcs(f'cannot determine output filename of'
             f'resized version of e[{path_to_image}]!')
        return False
    if util.is_file(resized_img_path):
        pfcs(f'output filename e[{resized_img_path}] already exist!')
        return False
    with Image.open(old_img_path) as image:
        resized_image = image.copy()
        resized_image.thumbnail((width, heigh))
        resized_image.save(resized_img_path)
        return True