예제 #1
0
def rename_and_move_file_to_final_dest(in_vid_path, new_vid_name, dest_path):
    final_vid_path = dest_path + '\\' + new_vid_name
    
    if fsu.is_file(final_vid_path):
        raise Exception(new_vid_name + " already exists in DVD_RIP_DIR")
    
    parent_dir_path = fsu.get_parent_dir_from_path(in_vid_path)
    new_vid_path = parent_dir_path + '\\' + new_vid_name
    
    
    
    fsu.rename_file_overwrite(in_vid_path, new_vid_path)
    print('copying ' , new_vid_path, ' to ', dest_path, '...')
    fsu.copy_files_to_dest([new_vid_path], dest_path)
    fsu.delete_if_exists(new_vid_path)
예제 #2
0
def create_vid_converted_copy_of_dir(in_dir_path,
                                     dest_parent_dir_path,
                                     new_vid_type='mkv',
                                     delete_og=False):
    if fsu.is_dir(in_dir_path) != True:
        raise Exception("ERROR:  in_dir_path must point to dir")
    if fsu.get_parent_dir_from_path(in_dir_path) == dest_parent_dir_path:
        raise Exception(
            "ERROR:  dest_parent_dir_path cannot be the parent dir of in_dir_path"
        )

    # make new empty dir
    in_path_basename = fsu.get_basename_from_path(in_dir_path)
    new_dir_path = dest_parent_dir_path + '//' + in_path_basename
    fsu.make_dir_if_not_exist(new_dir_path)

    # copy all files to new dir and recursively run this function on all contained dirs
    obj_path_l = fsu.get_abs_path_l_of_all_objects_in_dir(in_dir_path)
    print(
        'obj_path_l:  ', obj_path_l
    )  #`````````````````````````````````````````````````````````````````````````````

    for obj_path in obj_path_l:
        if fsu.is_file(obj_path):
            # convert vid files, copy over other files
            if fsu.get_file_extension(
                    obj_path) in VID_FILE_TYPES_TO_BE_CONVERTED:
                if new_vid_type == 'mkv':
                    convert_vid_file_to_mkv(obj_path, new_dir_path)
                elif new_vid_type == 'mp4':
                    raise Exception(
                        "ERROR:  convert to mp4 will make you loose subs, and other stuff not set up for it, comment this out to continue"
                    )
                    convert_vid_file_to_mp4(obj_path, new_dir_path)
                else:
                    raise Exception("ERROR:  Invalid new_vid_type: ",
                                    new_vid_type)

                if delete_og:
                    fsu.delete_if_exists(obj_path)

            else:
                fsu.copy_object_to_dest(obj_path, new_dir_path)

        elif fsu.is_dir(obj_path):
            create_vid_converted_copy_of_dir(obj_path, new_dir_path)
        else:
            raise Exception("ERROR:  obj_path must be a file or a dir")
예제 #3
0
    def _log_small_historical_data():
        file_system_utils.make_dir_if_not_exist(HISTORICAL_DATA_DIR_PATH)

        # make new log dir path
        now = datetime.datetime.now()
        date_time_str = now.strftime("%Y-%m-%d__%H_%M")
        new_log_dir_path = HISTORICAL_DATA_DIR_PATH + '/log__' + date_time_str

        # add new dir, delete old if exists
        file_system_utils.delete_if_exists(new_log_dir_path)
        os.mkdir(new_log_dir_path)

        # copy data from current_data to new dir in historical_data
        copy_path_l = [
            CURRENT_DATA_DIR_PATH + '/download_log.csv',
            CURRENT_DATA_DIR_PATH + '/pool_clips_data.csv',
            CURRENT_DATA_DIR_PATH + '/LOG_FILES'
        ]
        file_system_utils.copy_objects_to_dest(copy_path_l, new_log_dir_path)

        # get list of evaluated postIds
        pool_evaluated_post_id_l = []
        pool_clips_data_row_dl = logger.readCSV(CURRENT_DATA_DIR_PATH +
                                                '/pool_clips_data.csv')
        for row_d in pool_clips_data_row_dl:
            if row_d['status'] != '':
                pool_evaluated_post_id_l.append(row_d['postId'])


#         print(pool_evaluated_post_id_l)#``````````````````````````````````````````````````````````````````````````

# add pool_evaluated_post_id_l to existing list of evaluated post ids
        evaluated_post_id_l = get_evaluated_post_id_l()
        #         print(evaluated_post_id_l)#`````````````````````````````````````````````````````````````````````````
        json_logger.write(pool_evaluated_post_id_l + evaluated_post_id_l,
                          EVALUATED_POST_IDS_JSON_PATH)
예제 #4
0
def download_vids(num_posts,
                  subreddit_list,
                  dl_type='overwrite',
                  QUICK_TEST=False,
                  continue_from_last_pos=False,
                  include_youtube_downloads=True,
                  start_from_pos=None):
    # add new dirs if don't already exist
    print(INDENT + 'Adding new dirs if needed...')
    file_system_utils.make_dir_if_not_exist(CURRENT_DATA_DIR_PATH)
    file_system_utils.make_dir_if_not_exist(DOWNLOADED_CLIPS_DIR_PATH)

    print(INDENT + 'QUICK_TEST:  ', QUICK_TEST)
    print(INDENT + 'Getting post_info_dl...')
    if QUICK_TEST == True or continue_from_last_pos == True:
        post_info_dl = get_post_info_dl.get_post_info_dl(num_posts,
                                                         subreddit_list,
                                                         quick_test=True)
    else:
        post_info_dl = get_post_info_dl.get_post_info_dl(
            num_posts, subreddit_list)

    print(INDENT + 'Getting starting pos...')
    start_pos = dl_utils.start_pos(start_from_pos, continue_from_last_pos)

    if dl_type == 'overwrite':
        print(INDENT + 'Deleting all files in %s...' %
              (DOWNLOADED_CLIPS_DIR_PATH))
        file_system_utils.delete_all_files_in_dir(DOWNLOADED_CLIPS_DIR_PATH)
        print(INDENT + 'Deleting pool_clips_data.csv and download_log.csv')
        file_system_utils.delete_if_exists(CURRENT_DATA_DIR_PATH +
                                           '/pool_clips_data.csv')
        file_system_utils.delete_if_exists(CURRENT_DATA_DIR_PATH +
                                           '/download_log.csv')

    print(INDENT + 'Getting list of previously evaluated postIds...')
    evaluated_post_id_l = historical_data.get_evaluated_post_id_l()

    print(INDENT +
          'Getting dict list of saved, previously non-evaluated clips...')
    non_eval_clip_data_d = historical_data.get_non_eval_clip_data_d()

    for post_num, post_info_d in enumerate(post_info_dl[start_pos:]):
        post_num += start_pos
        while (True):
            try:
                fail_reason = None
                dl_start_time = time.time()

                testing_utils.print_str_wo_error(
                    '\n' + INDENT +
                    "Starting on post_info_d #:  %s   title: %s    url: %s ..."
                    % (post_num, post_info_d['postTitle'],
                       post_info_d['postURL']))

                vid_save_title = dl_utils.make_vid_save_name(
                    post_num)  #'f_' + str(post_num) + '/' +
                vid_save_path = DOWNLOADED_CLIPS_DIR_PATH + '/' + vid_save_title + '.mp4'
                clip_duration = 0

                if post_info_d['postId'] in evaluated_post_id_l:
                    print(
                        INDENT +
                        'This postId has been previously evaluated, skipping...'
                    )
                    fail_reason = 'prev_eval'

                # if vid has previously been downloaded but not evaluated and was saved,
                # just rename it from where it is saved instead of re-downloading
                elif post_info_d['postId'] in non_eval_clip_data_d.keys():
                    print(
                        INDENT +
                        'Pulling from previously download...  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
                    )
                    historical_data.pull_clip(
                        non_eval_clip_data_d[post_info_d['postId']],
                        vid_save_path)
                    clip_duration = dl_utils.get_vid_length(vid_save_path)

                # keep track of how often this happens, not dealing with it now b/c it seems like too much hassle
                elif post_info_d['postType'] == 'self':
                    print(
                        INDENT +
                        "post_info_d['postType'] == self, skipping... <-- ASSUMING THIS DOSNT HAPPEN MUCH, IF YOU SEE THIS MESSAGE TOO OFTEN, FIX THIS"
                    )
                    fail_reason = 'postType==self'

                # youtube video
                elif post_info_d['postType'] == None:
                    if not include_youtube_downloads:
                        print(
                            INDENT +
                            'Youtube video, include_youtube_downloads == False so skipping...'
                        )
                        fail_reason = 'incude_youtube_downloads==False'
                    else:
                        print(INDENT + '  Trying to download youtube video...')

                        # try to get clip duration
                        print(INDENT + '    Getting clip duration...')
                        clip_duration = False
                        try:
                            clip_duration = dl_utils.get_vid_duration__youtube(
                                post_info_d['postURL'])
                        except ValueError as e:
                            print(e)
                            fail_reason = 'error: value error: ' + str(
                                e
                            )  # <----------------- this error happening too much???
                        except subprocess.CalledProcessError as e:
                            if e.output == NO_INTERNET_YT_STR:
                                raise NoInternetError(
                                    'ERROR:  No internet connection')
                            else:
                                print(
                                    "Status : FAIL", e.output
                                )  #`````````````````````````````````````````````````````````````````````````

                                print(
                                    INDENT +
                                    'Video not available, possably removed, skipping...'
                                )
                                fail_reason = 'error: youtube video unavailable, possably removed -- subprocess.CalledProcessError'
                        except custom_errors.NotYoutubeVideoError:
                            print(INDENT + 'not a youtube vid, skipping...')
                            fail_reason = 'error: url_not_youtube_vid'


#                         except RegexNotFoundError:
#                             print(INDENT + 'RegexNotFoundError on youtube vid, MIGHT be possable to fix, keep an eye on how often this happens')
#                             fail_reason = 'error: RegexNotFoundError' #vid is available, MIGHT be able to fix later, one time vid had title with weird chars but not always

# download youtube video if clip duration was obtained and is less than MAX_CLIP_DURATION
                        if clip_duration != False:
                            if clip_duration < MAX_CLIP_DURATION:
                                print(INDENT + '  Attempting Download...')
                                dl_utils.download_youtube_vid(
                                    post_info_d['postURL'], vid_save_path)
                            else:
                                print(
                                    INDENT +
                                    '  Clip too long!  Moving on to next clip...'
                                )
                                fail_reason = 'clip_too_long'

                # embedded reddit video
                elif post_info_d['postType'] == 'direct':
                    #             print('ONLY TESTING YOUTUBE VIDS< THIS IS A REDDIT VID BREAKING NOW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')#````````````
                    #             continue#`````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````
                    #

                    print(INDENT + 'Trying to download reddit video...')
                    print(INDENT + 'Sleeping...')
                    time.sleep(1)
                    while True:
                        try:
                            #try to get vid duration
                            print(INDENT + 'Trying to get clip duration...')
                            check_clip_duration_after_download = False
                            clip_duration = dl_utils.get_vid_duration__reddit(
                                post_info_d['postId'])

                            if clip_duration == False:
                                check_clip_duration_after_download = True
                            elif clip_duration > MAX_CLIP_DURATION:
                                fail_reason = 'clip_too_long'
                                break

        #                     if dl_utils.get_clip_duration__reddit(post_info_d['postID']) < MAX_CLIP_DURATION:
                            dl_utils.download_reddit_vid(
                                post_info_d['postURL'],
                                DOWNLOADED_CLIPS_DIR_PATH, vid_save_title)

                            # delete video if its too long
                            if check_clip_duration_after_download == True:
                                clip_duration = dl_utils.get_vid_length(
                                    vid_save_path)
                                if clip_duration > MAX_CLIP_DURATION:
                                    print(
                                        '  Video too long, deleting video...')
                                    os.remove(vid_save_path)
                                    fail_reason = 'clip_too_long'
                            break

                        except (youtube_dl.utils.DownloadError, OSError) as e:
                            if dl_utils.str_from_list_in_error_msg(
                                    UN_BEATABLE_ERROR_STRINGS, e):
                                print(INDENT +
                                      'Hit un-beatable error skipping clip...')
                                fail_reason = 'error: reddit: un-beatable error'
                            else:
                                dl_utils.correct_failed_vid_audio_combine(
                                    DOWNLOADED_CLIPS_DIR_PATH, vid_save_title)
                            break
                break
            except (NoInternetError,
                    prawcore.exceptions.RequestException) as e:
                print(INDENT +
                      'No internet connection, sleeping then trying again...')
                time.sleep(1)

        # log all data from this attepted download, even if it was not a success
        print(INDENT + 'logging attempted download...')
        dl_time = time.time() - dl_start_time
        print(INDENT + '          Download attept time: ', dl_time)
        dl_utils.log_attempted_download(vid_save_path, post_info_d,
                                        clip_duration, dl_time, fail_reason)

    dl_report.print_dl_report()
예제 #5
0
def compile_all_clips_in_dir(clips_dir_path, output_vid_path):
    #     from moviepy.editor import VideoFileClip, concatenate_videoclips # this here so annoying msg / load doesnt happen every gui start

    # build concat txt file
    #     line_list = []
    #     vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #
    #     for vid_filename in vid_filenames_to_compile:
    #         vid_file_path = CLIPS_TO_COMPILE_DIR_PATH + '/' + vid_filename
    #         line_list.append('file ' + vid_file_path)
    # #         print(line_list)#``````````````````````````````````````````````````````````````````````````````````````````````````````````````
    #     write_text_file(VID_CONCAT_FILE_PATH, line_list)
    #
    #
    #     # concat the files in the txt file
    #     cmd = 'ffmpeg -f concat -i ' + VID_CONCAT_FILE_PATH + ' -c copy ' + output_vid_path + ' -y'
    #     print('cmd: ', cmd)#`````````````````````````````````````````````````````````````````````````````````````````````````
    #     subprocess.call(cmd, shell=True)

    #     def _make_input_files_str(vid_filenames_to_compile):
    #         input_files_str = ''
    #         for vid_filename in vid_filenames_to_compile:
    #             vid_file_path = clips_dir_path + '/' + vid_filename
    #             input_files_str += ' -i ' + vid_file_path
    #         return input_files_str

    def _try_to_rename_until_sucsessful_if_exists(src_path, dest_path):
        if os.path.exists(src_path):
            if os.path.exists(dest_path):
                os.remove(dest_path)
            while (True):
                try:
                    os.rename(src_path, dest_path)
                    break
                except PermissionError:
                    print(
                        'got PermissionError while trying to rename %s --> %s, sleeping then trying again...'
                        % (src_path, dest_path))
                    time.sleep(0.5)

    # put this back in below func !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    def _make_input_files_str(vid_path_l):
        input_files_str = ''
        for vid_path in vid_path_l:
            input_files_str += ' -i ' + vid_path
        return input_files_str

    def _make_thread_cmd_str_l(vid_paths_to_compile, save_file_name=None):
        def __make_cmd_str(vid_paths_to_compile, output_file_path):
            input_files_str = _make_input_files_str(vid_paths_to_compile)
            num_clips_str = str(len(vid_paths_to_compile))
            cmd = 'ffmpeg' + input_files_str + ' -filter_complex "[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=' + num_clips_str + ':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" ' + output_file_path + ' -y'
            return cmd

        def __all_cmd_str_fit(cmd_str_l):
            for cmd_str in cmd_str_l:
                if len(cmd_str) > MAX_CMD_CHARS:
                    return False
                return True

        def __split_list(alist, wanted_parts=1):
            length = len(alist)
            return [
                alist[i * length // wanted_parts:(i + 1) * length //
                      wanted_parts] for i in range(wanted_parts)
            ]

        if save_file_name == None:
            thread_output_file_path_list = ['thread_temp_1.mp4']
        else:
            thread_output_file_path_list = [save_file_name]

        cmd_str_l = [
            __make_cmd_str(vid_paths_to_compile,
                           thread_output_file_path_list[0])
        ]
        num_threads = 1

        while (not __all_cmd_str_fit(cmd_str_l)):
            cmd_str_l = []
            num_threads += 1
            thread_output_file_path_list.append('thread_temp_' +
                                                str(num_threads) + '.mp4')

            vid_path_ll = __split_list(vid_paths_to_compile, num_threads)
            for vid_path_l_num, vid_path_l in enumerate(vid_path_ll):
                cmd_str_l.append(
                    __make_cmd_str(
                        vid_path_l,
                        thread_output_file_path_list[vid_path_l_num]))

        return cmd_str_l, thread_output_file_path_list

#     # this one works but cmd line str can be too long !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

    vid_filenames_to_compile = [
        f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))
    ]
    vid_paths_to_compile = []
    for vid_filename in vid_filenames_to_compile:
        vid_paths_to_compile.append(clips_dir_path + '/' + vid_filename)

#     print('vid_paths_to_compile: ', vid_paths_to_compile)#```````````````````````````````````````````````````````````

    thread_cmd_str_l, thread_output_file_path_list = _make_thread_cmd_str_l(
        vid_paths_to_compile)

    if len(thread_cmd_str_l) == 1:
        #         print('cmd: ', thread_cmd_str_l[0])#````````````````````````````````````````````````````````````````````````````
        subprocess.call(thread_cmd_str_l[0], shell=True)

        #         print('about to rename')#```````````````````````````````````````````````````````````````````````````````````````````
        _try_to_rename_until_sucsessful_if_exists(
            thread_output_file_path_list[0], output_vid_path)


#         print('just renamed')#```````````````````````````````````````````````````````````````````````````````````````````

    else:

        def run_subprocess(thread_cmd_str):
            subprocess.call(thread_cmd_str, shell=True)

        thread_l = []
        for thread_cmd_str in thread_cmd_str_l:
            print(
                thread_cmd_str
            )  #`````````````````````````````````````````````````````````````````````````````````````
            th = Thread(target=run_subprocess, args=(thread_cmd_str, ))
            thread_l.append(th)

        for thread in thread_l:
            thread.start()

        for thread in thread_l:
            thread.join()

        final_output_temp_vid_path = 'final_temp.mp4'  # probably dont need to use this, error was unrelated, but im lazy
        thread_cmd_str_l_2, thread_output_file_path_list_2 = _make_thread_cmd_str_l(
            thread_output_file_path_list, final_output_temp_vid_path)
        #         print(thread_cmd_str_l_2)#```````````````````````````````````````````````````````````````````````````

        subprocess.call(thread_cmd_str_l_2[0], shell=True)

        file_system_utils.delete_if_exists(output_vid_path)
        _try_to_rename_until_sucsessful_if_exists(
            thread_output_file_path_list_2[0], output_vid_path)

        # delete temp thread vids
        for thread_output_file_path in thread_output_file_path_list:
            os.remove(thread_output_file_path)

    print('done with compile, num threads: ', len(thread_cmd_str_l)
          )  #``````````````````````````````````````````````````````

    #     input_files_str = _make_input_files_str(vid_filenames_to_compile)
    #     for vid_filename in vid_filenames_to_compile:
    #         vid_file_path = clips_dir_path + '/' + vid_filename
    #         input_files_str += ' -i ' + vid_file_path

    #     num_clips = str(len(vid_filenames_to_compile))
    #
    #     cmd = 'ffmpeg ' + input_files_str + ' -filter_complex "[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=' + num_clips + ':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" ' + output_vid_path + ' -y'
    #     print('cmd: ', cmd)#`````````````````````````````````````````````````````````````````````````````````````````````````
    #
    #     subprocess.call(cmd,shell=True)

    #     vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #     clip_list = []
    #     for vid_filename in vid_filenames_to_compile:
    #         clip = VideoFileClip(clips_dir_path + '/' + vid_filename)
    #         clip_list.append(clip)
    # #         clip.reader.close()
    # #         clip.audio.reader.close_proc()
    #
    #     final_clip = concatenate_videoclips(clip_list, method='compose')
    #     final_clip.write_videofile(output_vid_path)

    #     compile_progress_clip_path        = clips_dir_path + '/prog/' + 'A.mp4'
    #     temp_compile_progress_clip_path   = clips_dir_path + '/' + 'temp_prog.mp4'
    #     compile_progress_clip_backup_path = clips_dir_path + '/temp_prog'# + 'A_backup.mp4'
    #     vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #     cnt = 0# just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    #
    #     while(len(vid_filenames_to_compile) > 1):
    #         cnt += 1 # just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    #
    #
    #
    #
    #         if os.path.isfile(compile_progress_clip_path):
    #             clip_1_path = compile_progress_clip_path
    #         else:
    #             clip_1_path = clips_dir_path + '/' + vid_filenames_to_compile[0]
    #         clip_2_path = clips_dir_path + '/' + vid_filenames_to_compile[1]
    #         input_files_str = ' -i ' + clip_1_path + ' -i ' + clip_2_path
    #
    #
    # #         input_files_str = ''
    # #         for vid_filename in vid_filenames_to_compile[0:2]:
    # #             vid_file_path = clips_dir_path + '/' + vid_filename
    # #             input_files_str += ' -i ' + vid_file_path
    #
    #         print('input_files_str: ', input_files_str)
    #
    # #         cmd = 'ffmpeg ' + input_files_str + ' -filter_complex "[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=2:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" ' + output_vid_path + ' -y'
    #         cmd = 'ffmpeg ' + input_files_str + ' -filter_complex "[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=n=2:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" ' + 'temp.mp4' + ' -y'
    #
    # #         print('sleeping...')
    # #         time.sleep(5)
    #         print('cmd: ', cmd)#````````````````````````````````````````````````````````````````````````````````````````````````````````````````
    #         subprocess.call(cmd,shell=True)
    #
    #         file_system_utils.delete_if_exists(compile_progress_clip_path)
    #         os.rename('temp.mp4', compile_progress_clip_path)
    #
    #
    # #         for vid_filename in vid_filenames_to_compile[0:2]:
    # #             vid_path = clips_dir_path + '/' + vid_filename
    # #             if vid_path != compile_progress_clip_path:
    # #                 os.remove(vid_path)
    #
    #         if clip_1_path != compile_progress_clip_path:
    #             os.remove(clip_1_path)
    #         os.remove(clip_2_path)
    #
    #
    #
    #         vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #
    #         #copy the currently working vid so far just in case something goes wrong
    # #         file_system_utils.copy_files_to_dest([compile_progress_clip_path], compile_progress_clip_backup_path + str(cnt))
    #
    #     os.rename(compile_progress_clip_path, output_vid_path)

    #     compile_progress_clip_path = clips_dir_path + '/' + 'A.mp4'
    #     vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #     cnt = 0# just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    #     while(len(vid_filenames_to_compile) > 1):
    #         cnt += 1 # just for testing !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    #         clip_0 = VideoFileClip(clips_dir_path + '/' + vid_filenames_to_compile[0])
    #         clip_1 = VideoFileClip(clips_dir_path + '/' + vid_filenames_to_compile[1])
    #
    #         final_clip = concatenate_videoclips([clip_0, clip_1], method='compose')
    #         final_clip.write_videofile(compile_progress_clip_path)
    #
    #         clip_0.reader.close()
    #         clip_0.audio.reader.close_proc()
    #         clip_1.reader.close()
    #         clip_1.audio.reader.close_proc()
    #
    #         for vid_filename in vid_filenames_to_compile[0:2]:
    #             vid_path = clips_dir_path + '/' + vid_filename
    #             if vid_path != compile_progress_clip_path:
    #                 os.remove(vid_path)
    #
    #         vid_filenames_to_compile = [f for f in listdir(clips_dir_path) if isfile(join(clips_dir_path, f))]
    #
    #     os.rename(compile_progress_clip_path, output_vid_path)

    #     compile_progress_vid_file_path = 'compile_progress.mp4'

    print('done with compile')
예제 #6
0
def log_and_delete_current_data(delete=True):
    def _log_small_historical_data():
        file_system_utils.make_dir_if_not_exist(HISTORICAL_DATA_DIR_PATH)

        # make new log dir path
        now = datetime.datetime.now()
        date_time_str = now.strftime("%Y-%m-%d__%H_%M")
        new_log_dir_path = HISTORICAL_DATA_DIR_PATH + '/log__' + date_time_str

        # add new dir, delete old if exists
        file_system_utils.delete_if_exists(new_log_dir_path)
        os.mkdir(new_log_dir_path)

        # copy data from current_data to new dir in historical_data
        copy_path_l = [
            CURRENT_DATA_DIR_PATH + '/download_log.csv',
            CURRENT_DATA_DIR_PATH + '/pool_clips_data.csv',
            CURRENT_DATA_DIR_PATH + '/LOG_FILES'
        ]
        file_system_utils.copy_objects_to_dest(copy_path_l, new_log_dir_path)

        # get list of evaluated postIds
        pool_evaluated_post_id_l = []
        pool_clips_data_row_dl = logger.readCSV(CURRENT_DATA_DIR_PATH +
                                                '/pool_clips_data.csv')
        for row_d in pool_clips_data_row_dl:
            if row_d['status'] != '':
                pool_evaluated_post_id_l.append(row_d['postId'])


#         print(pool_evaluated_post_id_l)#``````````````````````````````````````````````````````````````````````````

# add pool_evaluated_post_id_l to existing list of evaluated post ids
        evaluated_post_id_l = get_evaluated_post_id_l()
        #         print(evaluated_post_id_l)#`````````````````````````````````````````````````````````````````````````
        json_logger.write(pool_evaluated_post_id_l + evaluated_post_id_l,
                          EVALUATED_POST_IDS_JSON_PATH)

    def _log_non_eval_clips():
        def __make_og_non_eval_post_id_clip_path_dl():
            new_row_dl = []
            pool_row_dl = logger.readCSV(CURRENT_DATA_DIR_PATH +
                                         '/pool_clips_data.csv')

            for pool_row_d in pool_row_dl:
                if pool_row_d['status'] == '':
                    new_row_dl.append({
                        'postId': pool_row_d['postId'],
                        'clip_path': pool_row_d['clip_path']
                    })
            return new_row_dl

        def __get_post_id_l(non_eval_clips_row_dl):
            post_id_l = []
            for row_dl in non_eval_clips_row_dl:
                post_id_l.append(row_dl['postId'])
            return post_id_l

        file_system_utils.make_dir_if_not_exist(NON_EVAL_CLIPS_DIR_PATH)
        try:
            non_eval_clips_row_dl = logger.readCSV(
                NON_EVAL_CLIPS_DATA_CSV_PATH)
        except FileNotFoundError:
            non_eval_clips_row_dl = []

        # make row_dl of postIDs and original clip paths
        og_non_eval_post_id_clip_path_dl = __make_og_non_eval_post_id_clip_path_dl(
        )

        # build final_non_eval_post_id_clip_path_dl - contains postId and new clip path that clip is about to be saved to
        # also will not include any postIds that are already logged
        final_non_eval_post_id_clip_path_dl = []
        existing_post_id_l = __get_post_id_l(non_eval_clips_row_dl)

        clips_added = 0
        for d in og_non_eval_post_id_clip_path_dl:
            if d['postId'] not in existing_post_id_l:
                new_save_name = 'non_eval_' + str(
                    len(non_eval_clips_row_dl) + clips_added) + '.mp4'
                final_non_eval_post_id_clip_path_dl.append({
                    'postId':
                    d['postId'],
                    'clip_path':
                    NON_EVAL_CLIPS_DIR_PATH + '/' + new_save_name
                })
                clips_added += 1

        # copy all non-evaluated clips to thier new home in non_eval_clips
        # could just rename, but this is nicer for testing
        og_pos = 0
        for d in final_non_eval_post_id_clip_path_dl:
            while (d['postId'] !=
                   og_non_eval_post_id_clip_path_dl[og_pos]['postId']):
                og_pos += 1
            og_clip_path = og_non_eval_post_id_clip_path_dl[og_pos][
                'clip_path']
            file_system_utils.copy_files_to_dest([og_clip_path],
                                                 NON_EVAL_CLIPS_DIR_PATH)
            just_copied_clip_path = NON_EVAL_CLIPS_DIR_PATH + '/' + ntpath.basename(
                og_clip_path)
            os.rename(just_copied_clip_path, d['clip_path'])

        # add info from final_non_eval_post_id_clip_path_dl to non_eval_clips_row_dl
        for row_d in final_non_eval_post_id_clip_path_dl:
            non_eval_clips_row_dl.append(row_d)

        logger.logList(non_eval_clips_row_dl, NON_EVAL_CLIPS_DATA_CSV_PATH,
                       False, NON_EVAL_CLIPS_DATA_CSV_HEADER_LIST, 'overwrite')

    def _prune_non_eval_clips():
        #         print(NON_EVAL_CLIPS_DIR_PATH)#````````````````````````````````````````````````````````````````````````````````````````````

        # remove clips until under max dir size
        age_sorted_non_eval_clip_path_l = file_system_utils.get_file_paths_in_dir_by_age(
            NON_EVAL_CLIPS_DIR_PATH)
        #         print(age_sorted_non_eval_clip_path_l)#``````````````````````````````````````````````````````````````````````````````````
        deleted_clip_path_l = []
        while (file_system_utils.get_size(NON_EVAL_CLIPS_DIR_PATH) >
               MAX_NON_EVAL_CLIPS_DIR_SIZE):
            pos = len(deleted_clip_path_l)
            os.remove(age_sorted_non_eval_clip_path_l[pos])
            deleted_clip_path_l.append(
                os.path.abspath(age_sorted_non_eval_clip_path_l[pos]))

        # remove rows that go to the paths of the clips that were just deleted
        non_eval_clips_row_dl = logger.readCSV(NON_EVAL_CLIPS_DATA_CSV_PATH)

        del_row_d_l = []
        for row_d_num, row_d in enumerate(non_eval_clips_row_dl):
            if os.path.abspath(row_d['clip_path']) in deleted_clip_path_l:
                del_row_d_l.append(row_d)

        for row_d in del_row_d_l:
            non_eval_clips_row_dl.remove(row_d)

        # go back through and rename everything so that in the csv it shows up as non_eval_0, 1, 2,...
        # need this so you don't get stuff overwritten next time
        rename_clips_for_order(non_eval_clips_row_dl)

    _log_small_historical_data()
    _log_non_eval_clips()
    _prune_non_eval_clips()
    if delete:
        try:
            file_system_utils.delete_if_exists(
                CURRENT_DATA_DIR_PATH)  # I know it exists
        except PermissionError as e:
            print(
                'ERROR  got permission error, make sure you dont have stuff open in visual studio, error: ',
                str(e))
예제 #7
0
def make_code_card(kwargs, test_mode):
    template_type = kwargs['template_type']
    store_name = kwargs['store_name']

    img_paths_to_delete_l = [
        get__test_mode_blank_store_template_img_path(store_name),
        get__test_mode_blank_template_img_path(template_type)
    ]
    if test_mode:
        # remove the needed box coords from the json file if it exists
        if fsu.is_file(TEMPLATE_BOX_COORDS_JSON_PATH):
            dim_template_box_coords_ddd = json_logger.read(
                TEMPLATE_BOX_COORDS_JSON_PATH)

            if TEMPLATE_DIMS_STR in dim_template_box_coords_ddd:
                dim_template_box_coords_ddd.pop(TEMPLATE_DIMS_STR)

                json_logger.write(dim_template_box_coords_ddd,
                                  TEMPLATE_BOX_COORDS_JSON_PATH)

        # remove imgs so they get re-made
#         img_paths_to_delete_l = [get__normalized_color_template_img_path(template_type),
#                                  get__blank_template_img_path(template_type),
#                                  get__blank_store_template_img_path(store_name)]
        img_paths_to_delete_l.append(
            get__normalized_color_template_img_path(template_type))
        img_paths_to_delete_l.append(
            get__blank_template_img_path(template_type))
        img_paths_to_delete_l.append(
            get__blank_store_template_img_path(store_name))

    img_paths_to_delete_l.append(
        get__test_mode_blank_store_template_img_path(store_name))
    img_paths_to_delete_l.append(
        get__test_mode_blank_template_img_path(template_type))

    for img_path in img_paths_to_delete_l:
        fsu.delete_if_exists(img_path)

    # get template_type_box_coords from json file
    # if the json file does not exist, it will be created
    # if the box_coords are not in the json file, they will be loaded from the normalized_color_template_img
    # if the normalized_color_template_img does not exist, it will be created from the user-made color_template_img
    print('  Getting template_type_box_coords...')
    template_type_box_coords = get_template_type_box_coords(template_type)

    for box_title, box_coords in template_type_box_coords.items():
        print(box_title + ' : ' + str(box_coords))

    # get blank_store_template_img from path
    # if blank_store_template image does not exist, make it
    # if blank_template_img does not already exist, it will be created in the process
    print('  Getting blank_store_template_img...')
    if test_mode:
        blank_store_template_img_path = get__test_mode_blank_store_template_img_path(
            store_name)
    else:
        blank_store_template_img_path = get__blank_store_template_img_path(
            store_name)

    print('blank_store_template_img_path: ', blank_store_template_img_path
          )  #```````````````````````````````````````````````````````````

    if not fsu.is_file(blank_store_template_img_path):
        print(
            '    Blank_store_template_img does not exist, creating it now...')
        make_new_blank_store_template(kwargs, template_type_box_coords,
                                      test_mode)


#     else:
    blank_store_template_img = pu.open_img(blank_store_template_img_path)
    #     blank_store_template_img.show()

    print('  Making new code_card_img...')
    return make_new_code_card(kwargs, template_type_box_coords,
                              blank_store_template_img)