def __validate_crf_and_preset_count(self, no_transcoding_mode, crf_values, presets): if not no_transcoding_mode and isinstance(crf_values, int) and isinstance(presets, str): return (False, 'No CRF value or preset has been specified. Did you mean to use the -ntm mode?') elif is_list(crf_values) and len(crf_values) > 1 and is_list(presets) and len(presets) > 1: return (False, 'More than one CRF value AND more than one preset specified. No suitable mode found.') return (True, '')
def __validate_crf_and_preset_count(self, crf_values, presets): result = True if is_list(crf_values) and len(crf_values) > 1 and is_list(presets) \ and len(presets) > 1: result = False return (result, 'More than one CRF value AND more than one preset ' 'specified. No suitable mode found.')
def plot_heatmap(x, y, z, xaxis_label=None, yaxis_label=None, mapaxis_label=None, x_sci=True, y_sci=True, name=None): assert utils.is_list(x) == utils.is_list(y) == utils.is_list(z) fig, ax = plt.subplots() ax.hexbin(x, y, C=z, gridsize=30, cmap=plt.get_cmap('Blues'), bins=None) utils.set_sci_axis(ax, x_sci, y_sci) pcm = ax.get_children()[2] cb = plt.colorbar(pcm, ax=ax) cb.set_label(mapaxis_label) utils.set_axis_labels(ax, xaxis_label, yaxis_label) utils.finalize(ax) utils.save_pdf(name)
def unify(x, y, theta): # x = kb.fact , y = alpha if theta is False: return False if x == y: # i.e: Parent = Parent, z = z, Mary = Mary return theta if utils.is_variable(x): return unify_var(x, y, theta) if utils.is_variable(y): return unify_var(y, x, theta) if utils.is_compound(x) and utils.is_compound(y): z = x.get_args() t = y.get_args() return unify(z, t, unify(x.get_predicate(), y.get_predicate(), theta)) if utils.is_list(x) and utils.is_list(y) and len(x) == len(y): return unify(x[1:], y[1:], unify(x[0], y[0], theta)) return False
def augment_random(image, aug_types="", volume='expand'): aug_types_all = [ "add_snow", "add_rain", "add_fog", "add_sun_flare", "add_speed" ] if aug_types == "": aug_types = aug_types_all output = [] if not (ut.is_list(aug_types)): raise Exception(err_aug_list_type) if volume == 'expand': for aug_type in aug_types: if not (aug_type in aug_types_all): raise Exception(err_aug_type) command = aug_type + '(image)' result = eval(command) if (ut.is_list(result)): output += result else: output.append(result) elif volume == 'same': ut.verify_image(image) for aug_type in aug_types: if not (aug_type in aug_types_all): raise Exception(err_aug_type) if (ut.is_list(image)): image_list = image for img in image_list: selected_aug = aug_types[random.randint(0, len(aug_types) - 1)] command = selected_aug + '(img)' output.append(eval(command)) else: selected_aug = aug_types[random.randint(0, len(aug_types) - 1)] command = selected_aug + '(image)' output = eval(command) else: raise Exception(err_aug_volume) return output
def correct_exposure(image): ut.verify_image(image) if (ut.is_list(image)): image_RGB = [] image_list = image for img in image_list: image_RGB.append(exposure_process(img)) else: image_RGB = exposure_process(image) return image_RGB
def train_on(self, filename, line_function, evaluate_function=None, header=True): hyperparams = [k for (k, p) in self.params.iteritems() if is_list(p) and k not in ['quadratic', 'cubic']] if len(hyperparams): if evaluate_function is None: raise ValueError("evaluate_function must be defined in order to hypersearch.") num_lines = sum(1 for line in open(filename)) train = int(math.ceil(num_lines * 0.8)) test = int(math.floor(num_lines * 0.2)) train_file = filename + '_vp_hypersearch_train' test_file = filename + '_vp_hypersearch_validate' filename = shuffle_file(filename, header=header) os.system('head -n {} {} > {}'.format(train, filename, train_file)) os.system('tail -n {} {} > {}'.format(test, filename, test_file)) pos = 0 for hyperparam in hyperparams: pos += 1 if len(self.params[hyperparam]) == 2: hypermin, hypermax = self.params[hyperparam] if hypermax / float(hypermin) > 100: param_range = [10 ** x for x in range(int(math.log10(hypermin)), int(math.log10(hypermax)) + 1)] else: param_range = range(int(hypermin), int(hypermax) + 1) else: param_range = self.params[hyperparam] best_value = None best_metric = None model = deepcopy(self) model.params['quiet'] = True model.params['debug'] = False for other_hyperparam in hyperparams[pos:]: average = (model.params[other_hyperparam][0] + model.params[other_hyperparam][1]) / 2.0 model.params[other_hyperparam] = average for value in param_range: print('Trying {} as value for {}...'.format(value, hyperparam)) model.params[hyperparam] = value model = model._run_train(train_file, line_function=line_function, evaluate_function=None, header=header) results = model.predict_on(test_file) eval_metric = evaluate_function(results) print('...{}'.format(eval_metric)) if best_metric is None or eval_metric < best_metric: #TODO: > best_metric = eval_metric best_value = value print('Best value for {} was {}!'.format(hyperparam, best_value)) self.params[hyperparam] = best_value self.line_function = line_function self.evaluate_function = evaluate_function self.header = header safe_remove(train_file) safe_remove(test_file) safe_remove(filename) return self else: return self._run_train(filename, line_function, evaluate_function, header)
def add_sun_flare(image, flare_center=-1, angle=-1, no_of_flare_circles=8, src_radius=400, src_color=(255, 255, 255)): ut.verify_image(image) if (angle != -1): angle = angle % (2 * math.pi) if not (no_of_flare_circles >= 0 and no_of_flare_circles <= 20): raise Exception(err_flare_circle_count) if (ut.is_list(image)): image_RGB = [] image_list = image imshape = image_list[0].shape for img in image_list: if (angle == -1): angle_t = random.uniform(0, 2 * math.pi) if angle_t == math.pi / 2: angle_t = 0 else: angle_t = angle if flare_center == -1: flare_center_t = (random.randint(0, imshape[1]), random.randint(0, imshape[0] // 2)) else: flare_center_t = flare_center x, y = add_sun_flare_line(flare_center_t, angle_t, imshape) output = add_sun_process(img, no_of_flare_circles, flare_center_t, src_radius, x, y, src_color) image_RGB.append(output) else: imshape = image.shape if (angle == -1): angle_t = random.uniform(0, 2 * math.pi) if angle_t == math.pi / 2: angle_t = 0 else: angle_t = angle if flare_center == -1: flare_center_t = (random.randint(0, imshape[1]), random.randint(0, imshape[0] // 2)) else: flare_center_t = flare_center x, y = add_sun_flare_line(flare_center_t, angle_t, imshape) output = add_sun_process(image, no_of_flare_circles, flare_center_t, src_radius, x, y, src_color) image_RGB = output return image_RGB
def plot_bar(x, y, hue=None, hue_count=1, line_values=None, line_label=None, legend=True, xaxis_label=None, yaxis_label=None, xticks=None, y_lim=None, x_sci=True, y_sci=True, fig_size=None, y_err=None, name=None): fig, ax = plt.subplots() if fig_size and isinstance(fig_size, list) and len(fig_size) > 0: if len(fig_size) == 1: fig.set_figwidth(fig_size[0]) else: fig.set_figwidth(fig_size[0]) fig.set_figheight(fig_size[1]) bar_width = 0.2 new_x = [x_ + bar_width for x_ in x] ticks_x = [x_ + 0.5*bar_width for x_ in new_x] if y_err: ax.errorbar(ticks_x, y, yerr=y_err, fmt='o', ecolor='r', capthick=1, elinewidth=1) plt.bar(new_x, y, width=bar_width) if line_values and utils.is_list(line_values): x_set = set(x) if len(line_values) == len(x_set): if line_label: ax.plot(ax.get_xticks(), line_values, label=line_label) hue_count += 1 else: ax.plot(ax.get_xticks(), line_values) if xticks and isinstance(xticks, list): plt.xticks(ticks_x, xticks) if y_lim and isinstance(y_lim, list) and len(y_lim) > 0: if len(y_lim) == 1: plt.ylim(ymin=y_lim[0]) else: plt.ylim(ymin=y_lim[0]) plt.ylim(ymax=y_lim[1]) if legend: utils.set_legend(ncol=hue_count) utils.set_sci_axis(ax, x_sci, y_sci) utils.set_axis_labels(ax, xaxis_label, yaxis_label) utils.finalize(ax, x_grid=False) utils.save_fig(name)
def _path(self, toparent=None): """ Get the path to this ObjectContainer. @param toparent: the _parent object up to which the path is relative. Default value is None., which gives the fully qualified path @return: The path to the ObjectContainer from toparent """ if self == toparent: return "" elif self._parent and self._parent != toparent: # return the path with list index if the given element is in a list if utils.is_list(self.parent._get(self._name)): return self._parent._path( toparent) + "." + "%s[%s]" % (self._name, self.get_index()) else: return self._parent._path(toparent) + "." + self._name else: return self._name
def add_rain(image, slant=-1, drop_length=20, drop_width=1, drop_color=(200, 200, 200), rain_type='None'): ## (200,200,200) a shade of gray ut.verify_image(image) slant_extreme = slant if not (ut.is_numeric(slant_extreme) and (slant_extreme >= -20 and slant_extreme <= 20) or slant_extreme == -1): raise Exception(err_rain_slant) if not (ut.is_numeric(drop_width) and drop_width >= 1 and drop_width <= 5): raise Exception(err_rain_width) if not (ut.is_numeric(drop_length) and drop_length >= 0 and drop_length <= 100): raise Exception(err_rain_length) if (ut.is_list(image)): image_RGB = [] image_list = image imshape = image[0].shape if slant_extreme == -1: slant = np.random.randint( -10, 10) ##generate random slant if no slant value is given rain_drops, drop_length = generate_random_lines( imshape, slant, drop_length, rain_type) for img in image_list: output = rain_process(img, slant_extreme, drop_length, drop_color, drop_width, rain_drops) image_RGB.append(output) else: imshape = image.shape if slant_extreme == -1: slant = np.random.randint( -10, 10) ##generate random slant if no slant value is given rain_drops, drop_length = generate_random_lines( imshape, slant, drop_length, rain_type) output = rain_process(image, slant_extreme, drop_length, drop_color, drop_width, rain_drops) image_RGB = output return image_RGB
def run_(model, train_filename=None, predict_filename=None, train_line_function=None, predict_line_function=None, evaluate_function=None, split=0.8, header=True, quiet=False, multicore=False): if is_list(model): model = model[0] if train_filename == predict_filename: train_filename, predict_filename = test_train_split(train_filename, train_pct=split, header=header) results = (model.train_on(train_filename, line_function=train_line_function, evaluate_function=evaluate_function, header=header) .predict_on(predict_filename, line_function=predict_line_function, header=header)) if not quiet and multicore: print 'Shuffling...' if train_filename == predict_filename: safe_remove(train_filename) safe_remove(predict_filename) safe_remove(model.get_cache_file()) safe_remove(model.get_model_file()) return results
def add_snow(image, snow_coeff=-1): ut.verify_image(image) if (snow_coeff != -1): if (snow_coeff < 0.0 or snow_coeff > 1.0): raise Exception(err_snow_coeff) else: snow_coeff = random.uniform(0, 1) snow_coeff *= 255 / 2 snow_coeff += 255 / 3 if (ut.is_list(image)): image_RGB = [] image_list = image for img in image_list: output = snow_process(img, snow_coeff) image_RGB.append(output) else: output = snow_process(image, snow_coeff) image_RGB = output return image_RGB
def add_fog(image, fog_coeff=-1): ut.verify_image(image) if (fog_coeff != -1): if (fog_coeff < 0.0 or fog_coeff > 1.0): raise Exception(err_fog_coeff) if (ut.is_list(image)): image_RGB = [] image_list = image imshape = image[0].shape for img in image_list: if fog_coeff == -1: fog_coeff_t = random.uniform(0.3, 1) else: fog_coeff_t = fog_coeff hw = int(imshape[1] // 3 * fog_coeff_t) haze_list = generate_random_blur_coordinates(imshape, hw) for haze_points in haze_list: img = add_blur( img, haze_points[0], haze_points[1], hw, fog_coeff_t ) ## adding all shadow polygons on empty mask, single 255 denotes only red channel img = cv2.blur(img, (hw // 10, hw // 10)) image_RGB.append(img) else: imshape = image.shape if fog_coeff == -1: fog_coeff_t = random.uniform(0.3, 1) else: fog_coeff_t = fog_coeff hw = int(imshape[1] // 3 * fog_coeff_t) haze_list = generate_random_blur_coordinates(imshape, hw) for haze_points in haze_list: image = add_blur(image, haze_points[0], haze_points[1], hw, fog_coeff_t) image = cv2.blur(image, (hw // 10, hw // 10)) image_RGB = image return image_RGB
def add_speed(image, speed_coeff=-1): ut.verify_image(image) if (speed_coeff != -1): if (speed_coeff < 0.0 or speed_coeff > 1.0): raise Exception(err_speed_coeff) if (ut.is_list(image)): image_RGB = [] image_list = image for img in image_list: if (speed_coeff == -1): count_t = int(15 * random.uniform(0, 1)) else: count_t = int(15 * speed_coeff) img = apply_motion_blur(img, count_t) image_RGB.append(img) else: if (speed_coeff == -1): count_t = int(15 * random.uniform(0, 1)) else: count_t = int(15 * speed_coeff) image_RGB = apply_motion_blur(image, count_t) return image_RGB
def plot_line(xs, ys, line_labels=None, xaxis_label=None, yaxis_label=None, xticks=None, vlines=None, vlines_kwargs=None, hlines=None, hlines_kwargs=None, x_sci=True, y_sci=True, y_lim=None, x_lim=None, legend_top=True, ls_cycle=False, marker_size=0, x_grid=True, y_grid=True, fig_size=None, name=None, draw_arrow=False): multiple_x = utils.is_list_of_list(xs) multiple_y = utils.is_list_of_list(ys) multiple_line_label = utils.is_list(line_labels) assert multiple_x == multiple_y == multiple_line_label fig, ax = plt.subplots() if fig_size and isinstance(fig_size, list) and len(fig_size) > 0: if len(fig_size) == 1: fig.set_figwidth(fig_size[0]) else: fig.set_figwidth(fig_size[0]) fig.set_figheight(fig_size[1]) ls_cycler = utils.get_line_styles_cycler(ls_cycle) ms_cycler = utils.get_marker_styles_cycler(marker_size > 0) if multiple_x: for x, y, line_label in zip(xs, ys, line_labels): ax.plot(x, y, label=line_label, ls=next(ls_cycler), marker=next(ms_cycler), markersize=marker_size) else: ax.plot(xs, ys, label=line_labels) if xticks and isinstance(xticks, list): x = xs[0] if multiple_x else xs plt.xticks(x, xticks) if y_lim and isinstance(y_lim, list) and len(y_lim) > 0: if len(y_lim) == 1: plt.ylim(ymin=y_lim[0]) else: plt.ylim(ymin=y_lim[0]) plt.ylim(ymax=y_lim[1]) if x_lim and isinstance(x_lim, list) and len(x_lim) > 0: if len(x_lim) == 1: plt.xlim(xmin=x_lim[0]) else: plt.xlim(xmin=x_lim[0]) plt.xlim(xmax=x_lim[1]) ncol = len(xs) if multiple_x else 1 utils.set_legend(legend_top, ncol) utils.set_sci_axis(ax, x_sci, y_sci) utils.set_axis_labels(ax, xaxis_label, yaxis_label) vlines = vlines or [] for xvline in vlines: with ALTERNATIVE_PALETTE: plt.axvline(x=xvline, **vlines_kwargs) hlines = hlines or [] for yhline in hlines: with ALTERNATIVE_PALETTE: plt.axhline(y=yhline, **hlines_kwargs) utils.finalize(ax, x_grid=x_grid, y_grid=y_grid) utils.save_fig(name)
def main(): if len(sys.argv) == 1: line() print( "To see more details about the available arguments, enter 'python main.py -h'" ) line() parser = ArgumentParser(formatter_class=RawTextHelpFormatter) # Original video path. parser.add_argument( '-ovp', '--original-video-path', type=str, required=True, help='Enter the path of the original ' 'video. A relative or absolute path can be specified. ' 'If the path contains a space, it must be surrounded in double quotes.\n' 'Example: -ovp "C:/Users/H/Desktop/file 1.mp4"') # Set the number of threads to be used when computing VMAF. parser.add_argument( '--threads', type=str, default=str(os.cpu_count()), help='Set the number of threads to be used when computing VMAF.') # Which video encoder to use. parser.add_argument( '-e', '--video-encoder', type=str, default='x264', choices=['x264', 'x265', 'av1'], help='Specify the encoder to use (default: x264).\nExample: -e x265') # Set AV1 speed/quality ratio parser.add_argument( '--cpu-used', type=int, default=1, choices=range(1, 9), help= 'Only applicable if choosing the AV1 encoder. Set speed/quality ratio. Value Range: 1-8\n' 'Lower values mean slower encoding but better quality, and vice-versa.' ) # CRF value(s). parser.add_argument('-crf', '--crf-value', type=int, nargs='+', choices=range(0, 52), default=23, help='Specify the CRF value(s) to use.', metavar='CRF_VALUE(s)') # Preset(s). parser.add_argument('-p', '--preset', type=str, nargs='+', choices=[ 'veryslow', 'slower', 'slow', 'medium', 'fast', 'faster', 'veryfast', 'superfast', 'ultrafast' ], default='medium', help='Specify the preset(s) to use.', metavar='PRESET(s)') # The time interval to use when creating the overview video. parser.add_argument( '-i', '--interval', type=int, choices=range(1, 600), default=0, help= 'Create a lossless overview video by grabbing a <cliplength> seconds long segment ' 'every <interval> seconds from the original video and use this overview video ' 'as the "original" video that the transcodes are compared with.\nExample: -i 30', metavar='<an integer between 1 and 600>') # The length of each clip. parser.add_argument( '-cl', '--clip-length', type=int, choices=range(1, 60), default=1, help= 'Defines the length of the clips. Only applies when used with -i > 0. Default: 1.\n' 'Example: -cl 10', metavar='<an integer between 1 and 60>') # Use only the first x seconds of the original video. parser.add_argument( '-t', '--encode-length', type=str, help= 'Create a lossless version of the original video that is just the first x seconds of the ' 'video, use the cut version as the reference and for all encodes. ' 'You cannot use this option in conjunction with the -i or -cl arguments.' 'Example: -t 60') # Enable phone model? parser.add_argument('-pm', '--phone-model', action='store_true', help='Enable VMAF phone model.') # Number of decimal places to use for the data. parser.add_argument( '-dp', '--decimal-places', type=int, default=2, help='The number of decimal places to use for the data ' 'in the table (default: 2).\nExample: -dp 3') # Calculate SSIM? parser.add_argument('-ssim', '--calculate-ssim', action='store_true', help='Calculate SSIM in addition to VMAF.') # Calculate PSNR? parser.add_argument('-psnr', '--calculate-psnr', action='store_true', help='Calculate PSNR in addition to VMAF.') # No transcoding mode. parser.add_argument( '-ntm', '--no-transcoding-mode', action='store_true', help= 'Use this mode if you\'ve already transcoded a video and would like its VMAF and ' '(optionally) the SSIM and PSNR to be calculated.\n' 'Example: -ntm -tvp transcoded.mp4 -ovp original.mp4 -ssim -psnr') # Transcoded video path (only applicable when using the -ntm mode). parser.add_argument( '-tvp', '--transcoded-video-path', help= 'The path of the transcoded video (only applicable when using the -ntm mode).' ) args = parser.parse_args() if args.calculate_psnr: exit_program( 'PSNR calculation is currently unavailable due to a change that was made in libvmaf v2.0.0.\n' 'Visit https://github.com/Netflix/vmaf/issues/787 for more information.\n' 'You can re-run your command without the psnr argument, but PSNR values will not be calculated.' ) args_validator = ArgumentsValidator() validation_result, validation_errors = args_validator.validate(args) if not validation_result: for error in validation_errors: print(f'Error: {error}') exit_program('Argument validation failed.') decimal_places = args.decimal_places original_video_path = args.original_video_path filename = Path(original_video_path).name output_folder = f'({filename})' os.makedirs(output_folder, exist_ok=True) # this includes the dot eg '.mp4' output_ext = Path(original_video_path).suffix # The M4V container does not support the H.265 codec. if output_ext == '.m4v' and args.video_encoder == 'x265': output_ext = '.mp4' # Use class VideoInfoProvider to get the framerate, bitrate and duration provider = VideoInfoProvider(original_video_path) fps = provider.get_framerate_fraction() fps_float = provider.get_framerate_float() original_bitrate = provider.get_bitrate() line() print( 'Video Quality Metrics\nGitHub.com/BassThatHertz/video-quality-metrics' ) line() print('Here\'s some information about the original video:') print(f'Filename: {filename}') print(f'Bitrate: {original_bitrate}') print(f'Framerate: {fps} ({fps_float}) FPS') line() table = PrettyTable() table_column_names = ['Encoding Time (s)', 'Size', 'Bitrate', 'VMAF'] if args.calculate_ssim: table_column_names.append('SSIM') if args.calculate_psnr: table_column_names.append('PSNR') if args.no_transcoding_mode: del table_column_names[0] if args.interval > 0: clip_length = str(args.clip_length) result, concatenated_video = create_movie_overview( original_video_path, output_folder, args.interval, clip_length) if result: original_video_path = concatenated_video else: exit_program( 'Something went wrong when trying to create the overview video.' ) factory = FfmpegProcessFactory() timer = Timer() if not args.no_transcoding_mode: # args.crf_value is a list when more than one CRF value is specified. if is_list(args.crf_value) and len(args.crf_value) > 1: print('CRF comparison mode activated.') crf_values = args.crf_value crf_values_string = ', '.join(str(crf) for crf in crf_values) preset = args.preset[0] if is_list(args.preset) else args.preset print( f'CRF values {crf_values_string} will be compared and the {preset} preset will be used.' ) video_encoder = args.video_encoder # Cannot use os.path.join for output_folder as this gives an error like the following: # No such file or directory: '(2.mkv)\\Presets comparison at CRF 23/Raw JSON Data/superfast.json' output_folder = f'({filename})/CRF comparison at preset {preset}' os.makedirs(output_folder, exist_ok=True) # The comparison table will be in the following path: comparison_table = os.path.join(output_folder, 'Table.txt') # Add a CRF column. table_column_names.insert(0, 'CRF') # Set the names of the columns table.field_names = table_column_names # The user only wants to transcode the first x seconds of the video. if args.encode_length and args.interval == 0: original_video_path = cut_video(filename, args, output_ext, output_folder, comparison_table) # Transcode the video with each CRF value. for crf in crf_values: transcode_output_path = os.path.join(output_folder, f'CRF {crf}{output_ext}') graph_filename = f'CRF {crf} at preset {preset}' arguments = EncodingArguments() arguments.infile = str(original_video_path) arguments.encoder = Encoder[video_encoder] if video_encoder == 'av1': arguments.av1_compression = str(args.cpu_used) arguments.crf = str(crf) arguments.preset = preset arguments.outfile = transcode_output_path process = factory.create_process(arguments) print(f'Transcoding the video with CRF {crf}...') timer.start() process.run() time_rounded = timer.end(decimal_places) print('Done!') transcode_size = os.path.getsize( transcode_output_path) / 1_000_000 transcoded_bitrate = provider.get_bitrate( transcode_output_path) size_rounded = force_decimal_places( round(transcode_size, decimal_places), decimal_places) data_for_current_row = [ f'{size_rounded} MB', transcoded_bitrate ] os.makedirs(os.path.join(output_folder, 'Raw JSON Data'), exist_ok=True) # os.path.join doesn't work with libvmaf's log_path option so we're manually defining the path with # slashes. json_file_path = f'{output_folder}/Raw JSON Data/CRF {crf}.json' run_libvmaf(transcode_output_path, args, json_file_path, fps, original_video_path, factory) create_table_plot_metrics(comparison_table, json_file_path, args, decimal_places, data_for_current_row, graph_filename, table, output_folder, time_rounded, crf) with open(comparison_table, 'a') as f: f.write(f'\nFile Transcoded: {filename}') f.write(f'\nBitrate: {original_bitrate}') f.write(f'\nEncoder used for the transcodes: {video_encoder}') f.write(f'\nPreset used for the transcodes: {preset}') # args.preset is a list when more than one preset is specified. elif is_list(args.preset): print('Presets comparison mode activated.') chosen_presets = args.preset presets_string = ', '.join(chosen_presets) crf = args.crf_value[0] if is_list( args.crf_value) else args.crf_value video_encoder = args.video_encoder print( f'Presets {presets_string} will be compared at a CRF of {crf}.' ) # Cannot use os.path.join for output_folder as this gives an error like the following: # No such file or directory: '(2.mkv)\\Presets comparison at CRF 23/Raw JSON Data/superfast.json' output_folder = f'({filename})/Presets comparison at CRF {crf}' os.makedirs(output_folder, exist_ok=True) comparison_table = os.path.join(output_folder, 'Table.txt') table_column_names.insert(0, 'Preset') # Set the names of the columns table.field_names = table_column_names # The user only wants to transcode the first x seconds of the video. if args.encode_length: original_video_path = cut_video(filename, args, output_ext, output_folder, comparison_table) # Transcode the video with each preset. for preset in chosen_presets: transcode_output_path = os.path.join(output_folder, f'{preset}{output_ext}') graph_filename = f"Preset '{preset}'" arguments = EncodingArguments() arguments.infile = original_video_path arguments.encoder = Encoder[video_encoder] arguments.crf = str(crf) arguments.preset = preset arguments.outfile = transcode_output_path process = factory.create_process(arguments) line() print(f'Transcoding the video with preset {preset}...') timer.start() process.run() time_rounded = timer.end(decimal_places) print('Done!') transcode_size = os.path.getsize( transcode_output_path) / 1_000_000 transcoded_bitrate = provider.get_bitrate( transcode_output_path) size_rounded = force_decimal_places( round(transcode_size, decimal_places), decimal_places) data_for_current_row = [ f'{size_rounded} MB', transcoded_bitrate ] os.makedirs(os.path.join(output_folder, 'Raw JSON Data'), exist_ok=True) # os.path.join doesn't work with libvmaf's log_path option so we're manually defining the path with # slashes. json_file_path = f'{output_folder}/Raw JSON Data/{preset}.json' run_libvmaf(transcode_output_path, args, json_file_path, fps, original_video_path, factory) create_table_plot_metrics(comparison_table, json_file_path, args, decimal_places, data_for_current_row, graph_filename, table, output_folder, time_rounded, preset) with open(comparison_table, 'a') as f: f.write(f'\nFile Transcoded: {filename}') f.write(f'\nBitrate: {original_bitrate}') f.write(f'\nEncoder used for the transcodes: {video_encoder}') f.write(f'\nCRF value used for the transcodes: {crf}') # -ntm argument was specified. else: line() output_folder = f'({filename})' os.makedirs(output_folder, exist_ok=True) comparison_table = os.path.join(output_folder, 'Table.txt') table.field_names = table_column_names # os.path.join doesn't work with libvmaf's log_path option so we're manually defining the path with slashes. json_file_path = f'{output_folder}/QualityMetrics.json' # Run libvmaf to get the quality metric(s). run_libvmaf(args.transcoded_video_path, args, json_file_path, fps, original_video_path, factory) transcode_size = os.path.getsize( args.transcoded_video_path) / 1_000_000 size_rounded = force_decimal_places( round(transcode_size, decimal_places), decimal_places) transcoded_bitrate = provider.get_bitrate(args.transcoded_video_path) data_for_current_row = [f'{size_rounded} MB', transcoded_bitrate] graph_filename = 'The variation of the quality of the transcoded video throughout the video' # Create the table and plot the metrics if -dqm was not specified. create_table_plot_metrics(json_file_path, args, decimal_places, data_for_current_row, graph_filename, table, output_folder, time_rounded=None, crf_or_preset=None) line() print(f'All done! Check out the ({filename}) folder.')
def vw_base_command(self, base): l = base if self.params.get('bits') is not None: l.append('-b ' + str(int(self.params['bits']))) if self.params.get('learning_rate') is not None: l.append('--learning_rate ' + str(float(self.params['learning_rate']))) if self.params.get('l1') is not None: l.append('--l1 ' + str(float(self.params['l1']))) if self.params.get('l2') is not None: l.append('--l2 ' + str(float(self.params['l2']))) if self.params.get('initial_t') is not None: l.append('--initial_t ' + str(float(self.params['initial_t']))) if self.params.get('binary'): l.append('--binary') if self.params.get('link') is not None: l.append('--link ' + str(self.params['link'])) if self.params.get('quadratic') is not None: l.append(' '.join(['-q ' + str(s) for s in self.params['quadratic']]) if is_list(self.params['quadratic']) else '-q ' + str(self.params['quadratic'])) if self.params.get('cubic') is not None: l.append(' '.join(['--cubic ' + str(s) for s in self.params['cubic']]) if is_list(self.params['cubic']) else '--cubic ' + str(self.params['cubic'])) if self.params.get('power_t') is not None: l.append('--power_t ' + str(float(self.params['power_t']))) if self.params.get('loss') is not None: l.append('--loss_function ' + str(self.params['loss'])) if self.params.get('decay_learning_rate') is not None: l.append('--decay_learning_rate ' + str(float(self.params['decay_learning_rate']))) if self.params.get('lda') is not None: l.append('--lda ' + str(int(self.params['lda']))) if self.params.get('lda_D') is not None: l.append('--lda_D ' + str(int(self.params['lda_D']))) if self.params.get('lda_rho') is not None: l.append('--lda_rho ' + str(float(self.params['lda_rho']))) if self.params.get('lda_alpha') is not None: l.append('--lda_alpha ' + str(float(self.params['lda_alpha']))) if self.params.get('minibatch') is not None: l.append('--minibatch ' + str(int(self.params['minibatch']))) if self.params.get('oaa') is not None: l.append('--oaa ' + str(int(self.params['oaa']))) if self.params.get('unique_id') is not None: l.append('--unique_id ' + str(int(self.params['unique_id']))) if self.params.get('total') is not None: l.append('--total ' + str(int(self.params['total']))) if self.params.get('node') is not None: l.append('--node ' + str(int(self.params['node']))) if self.params.get('threads'): l.append('--threads') if self.params.get('span_server') is not None: l.append('--span_server ' + str(self.params['span_server'])) if self.params.get('mem') is not None: l.append('--mem ' + str(int(self.params['mem']))) if self.params.get('audit'): l.append('--audit') if self.params.get('bfgs'): l.append('--bfgs') if self.params.get('termination'): l.append('--termination ' + str(int(self.params['termination']))) if self.params.get('adaptive'): l.append('--adaptive') if self.params.get('nn') is not None: l.append('--nn ' + str(int(self.params['nn']))) if self.params.get('rank') is not None: l.append('--rank ' + str(int(self.params['rank']))) if self.params.get('lrq') is not None: l.append('--lrq ' + str(int(self.params['lrq']))) if self.params.get('lrqdropout'): l.append('--lrqdropout') if self.params.get('holdout_off'): l.append('--holdout_off') if self.params.get('quiet'): l.append('--quiet') return ' '.join(l)
def plot_scatter(xs, ys, line_labels=None, xaxis_label=None, yaxis_label=None, xticks=None, vlines=None, vlines_kwargs=None, hlines=None, hlines_kwargs=None, x_sci=True, y_sci=True, y_lim=None, legend_top=True, fig_size=None, ls_cycle=False, name=None): multiple_x = utils.is_list_of_list(xs) multiple_y = utils.is_list_of_list(ys) multiple_line_label = utils.is_list(line_labels) assert multiple_x == multiple_y == multiple_line_label fig, ax = plt.subplots() if fig_size and isinstance(fig_size, list) and len(fig_size) > 0: if len(fig_size) == 1: fig.set_figwidth(fig_size[0]) else: fig.set_figwidth(fig_size[0]) fig.set_figheight(fig_size[1]) colors = iter(cm.rainbow(np.linspace(0, 1, len(ys)))) if multiple_x: for x, y, line_label in zip(xs, ys, line_labels): ax.scatter(x, y, label=line_label, c=next(colors)) else: ax.scatter(xs, ys, label=line_labels, c=next(colors)) if xticks and isinstance(xticks, list): x = xs[0] if multiple_x else xs plt.xticks(x, xticks) if y_lim and isinstance(y_lim, list) and len(y_lim) > 0: if len(y_lim) == 1: plt.ylim(ymin=y_lim[0]) else: plt.ylim(ymin=y_lim[0]) plt.ylim(ymax=y_lim[1]) plt.xlim(xmin=0) ncol = len(xs) if multiple_x else 1 utils.set_legend(legend_top, ncol) utils.set_sci_axis(ax, x_sci, y_sci) utils.set_axis_labels(ax, xaxis_label, yaxis_label) vlines = vlines or [] for xvline in vlines: with ALTERNATIVE_PALETTE: plt.axvline(x=xvline, **vlines_kwargs) hlines = hlines or [] for yhline in hlines: with ALTERNATIVE_PALETTE: plt.axhline(y=yhline, **hlines_kwargs) utils.finalize(ax) utils.save_fig(name)
original_video_path = concatenated_video else: exit_program('Something went wrong when trying to create the overview video.') # The -ntm argument was not specified. if not args.no_transcoding_mode: vmaf_scores = [] if video_encoder == 'x264': crf = '23' elif video_encoder == 'x265': crf = '28' elif video_encoder == 'libaom-av1': crf = '32' # CRF comparison mode. if is_list(args.crf) and len(args.crf) > 1: log.info('CRF comparison mode activated.') crf_values = args.crf crf_values_string = ', '.join(str(crf) for crf in crf_values) preset = args.preset[0] if is_list(args.preset) else args.preset log.info(f'CRF values {crf_values_string} will be compared and the {preset} preset will be used.') line() prev_output_folder, comparison_table, output_ext = create_output_folder_initialise_table('CRF') # The user only wants to transcode the first x seconds of the video. if args.encode_length: original_video_path = cut_video(filename, args, output_ext, prev_output_folder, comparison_table) for crf in crf_values: log.info(f'| CRF {crf} |')