def stats(): # Configura os objetos config = util.Config('config.json') # Base object video_seg = util.VideoSegment(config=config) video_seg.project = 'ffmpeg' video_seg.segment_base = 'segment' # To iterate decoders = ['ffmpeg', 'mp4client'] videos_list = config.videos_list tile_list = config.tile_list q_factors = ['rate', 'qp'] multithreads = ['single'] times = dict() for factors in product(decoders, videos_list, tile_list, q_factors, multithreads): video_seg.decoder = factors[0] video_seg.name = factors[1] video_seg.fmt = factors[2] video_seg.factor = factors[3] video_seg.thread = factors[4] video_seg.dectime_base = f'dectime_{video_seg.decoder}' video_seg.quality_list = getattr(config, f'{video_seg.factor}_list') for video_seg.quality in video_seg.quality_list: times = util.collect_data(video_seg=video_seg) util.save_json(times, 'times.json')
def hist(): """ Fazer um histograma para cada fator que estamos avaliando qualidade: 2000 kbps, 24000 kbps fmt: 1x1, 3x2, 6x4 video: om_nom e rollercoaster Total: 2 x 3 x 2 = 12 histogramas :return: """ config = util.Config('Config.json') dectime = util.load_json('times.json') factors = (['om_nom', 'rollercoaster'], ['1x1', '3x2', '6x4'], [2000000, 24000000]) # for name, fmt, quality in product(*factors): # m, n = list(map(int, fmt.split('x'))) # # factors = (list(range(1, m * n + 1)), # list(range(1, config.duration + 1))) # times = [] # for tile, chunk in product(*factors): # times.append(dectime['ffmpeg'][name][fmt]['rate'][str(quality)][str(tile)][str(chunk)]['single']['times']['ut']) times = [[dectime['ffmpeg'][name][fmt]['rate'][str(quality)][str(tile)][str(chunk)]['single']['times']['ut'] for (tile, chunk) in product(list(range(1, list(map(int, fmt.split('x')))[0] * list(map(int, fmt.split('x')))[1] + 1)), list(range(1, config.duration + 1)))] for (name, fmt, quality) in product(*factors)]
def encode(): # Configure objetcts config = util.Config('config.json') sl = util.check_system()['sl'] # Create video object and your main folders video = util.VideoParams(config=config, yuv=f'..{sl}yuv-full', hevc_base='hevc', mp4_base='mp4', segment_base='segment', dectime_base='dectime') # Set basic configuration video.encoder = 'ffmpeg' video.project = 'ffmpeg_crf_18videos_60s' video.factor = 'crf' # iterate over 3 factors: video (complexity), tiles format, quality for video.name in config.videos_list: for video.tile_format in config.tile_list: for video.quality in getattr(config, f'{video.factor}_list'): util.encode(video) # util.encapsule(video) # util.extract_tile(video) util.make_segments(video)
def hist(): """ Fazer um histograma para cada fator que estamos avaliando qualidade: 2000 kbps, 24000 kbps fmt: 1x1, 3x2, 6x4 video: om_nom e rollercoaster Total: 2 x 3 x 2 = 12 histogramas :return: """ config = util.Config('Config.json') dectime = util.load_json('times.json') # for name, fmt, quality in product(*factors): # m, n = list(map(int, fmt.split('x'))) # # factors = (list(range(1, m * n + 1)), # list(range(1, config.duration + 1))) # times = [] # for tile, chunk in product(*factors): # times.append(dectime['ffmpeg'][name][fmt]['rate'][str(quality)][str(tile)][str(chunk)]['single']['times']['ut']) # for (tile, chunk) in # product(list(range(1, list(map(int, fmt.split('x')))[0] * list(map(int, fmt.split('x')))[1] + 1)), # list(range(1, config.duration + 1)))] # for (name, fmt, quality) in # product(*factors)] for name in config.videos_list: for fmt in config.tile_list: for quality in config.rate_list: times = [] sizes = [] for tile in range( 1, list(map(int, fmt.split('x')))[0] * list(map(int, fmt.split('x')))[1] + 1): for chunk in range(1, config.duration + 1): times.append( dectime['ffmpeg'][name][fmt]['rate'][str(quality)] [str(tile)][str(chunk)]['single']['times']['ut']) sizes.append(dectime['ffmpeg'][name][fmt]['rate'][str( quality)][str(tile)][str(chunk)]['single']['size']) plt.close() # plt.hist(times, bins=20) plt.plot(times) plt.show() # os.makedirs('hist', exist_ok=True) # plt.savefig(f'hist{sl}{name}_{fmt}_rate{quality}') print('ok')
def encode(): config = util.Config('config.json') sl = util.check_system()['sl'] video = util.VideoParams(config=config, yuv=f'..{sl}yuv-full', hevc_base='hevc', mp4_base='mp4', segment_base='segment', dectime_base='dectime', project='ffmpeg-60s-qp', encoder='ffmpeg', factor='crf') # for video.name in config.videos_list: for video.name in ['om_nom', 'rollercoaster']: for video.tile_format in config.tile_list: for video.quality in getattr(config, f'{video.factor}_list'): util.encode(video)
def decode(): # Configura os objetos config = util.Config('config.json') # Cria objeto "video" com suas principais pastas video = util.VideoParams(config=config, yuv=f'..{sl}yuv-10s') video.project = 'ffmpeg' decoders = ['ffmpeg', 'mp4client'] threads = ['single'] # 'single' or 'multi' factor = ['rate', 'qp'] for (video.decoder, video.name, video.tile_format, video.factor, video.threads) in itertools.product(decoders, config.videos_list, config.tile_list, factor, threads): video.dectime_base = f'dectime_{video.decoder}' video.quality_list = getattr(config, f'{video.factor}_list') for video.quality in video.quality_list: util.decode(video=video)
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import proto_train, proto_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/fgvc_fewshot', args=args) config = util.Config(args, name) train_loader = dataloader.meta_train_dataloader(data_path=pm.support, shots=config.shots, way=config.way) if args.resnet: res = 224 else: res = 84 oid_path = '../../dataset/oid_fewshot/res_%d' % (res) oid_loader = dataloader.oid_dataloader(oid_path, args.batch_size) model = networks.Proto_PN_less_annot(num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet) model.cuda()
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import proto_train,proto_eval,networks,dataloader,util args,name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot',args=args) config = util.Config(args,name, train_annot='bbx') train_loader = dataloader.meta_train_dataloader(data_path=pm.support, shots=config.shots, way=config.way, annot=config.train_annot, annot_path=pm.annot_path) model = networks.Proto_bbN(num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet) model.cuda() train_func = partial(proto_train.bbN_train,train_loader=train_loader,alpha=args.alpha) eval_func = proto_eval.default_eval tm = util.Train_Manager(args,pm,config,
#!/bin/env python3 from utils import util import itertools sl = util.check_system()['sl'] config = util.Config('config.json', factor='qp') i_folder = f'..{sl}yuv-full' o_folder = f'results{sl}{ffmpeg_4videos_1x1_compare-quality}' videos_list = dict(om_nom={ "filename": "om_nom_4320x2160_30.yuv", "time": "0:10", "group": "0" }, elephants={ "filename": "elephants_4320x2160_30.yuv", "time": "1:00", "group": "1" }, ski={ "filename": "ski_4320x2160_30.yuv", "time": "0:40", "group": "2" }, rollercoaster={ "filename": "rollercoaster_4320x2160_30.yuv", "time": "1:30", "group": "3" })
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import dynamic_train, dynamic_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='stage_2', shots=[20], train_annot='part', eval_annot='part') train_loader = dataloader.meta_train_dataloader(data_path=pm.support, way=config.way, shots=config.shots, annot=config.train_annot, annot_path=pm.annot_path) num_class = len(train_loader.dataset.classes) model = networks.Dynamic_PN_gt(num_class=num_class, num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet)
def graph4(): """ Este plot compara tile a tile a taxa e o tempo de decodificação para diferentes qualidades. :return: """ config = util.Config('Config.json') dectime = util.load_json('times.json') dirname = 'graph4' os.makedirs(f'{dirname}', exist_ok=True) for fmt in config.tile_list: m, n = list(map(int, fmt.split('x'))) for tile in range(1, m * n + 1): times = util.AutoDict() sizes = util.AutoDict() times_a_ld = [] times_a_hd = [] sizes_a_ld = [] sizes_a_hd = [] times_b_ld = [] times_b_hd = [] sizes_b_ld = [] sizes_b_hd = [] # for name in config.videos_list: # for quality in config.rate_list: # t = [] # s = [] # for chunk in range(1, config.duration + 1): # t.append(dectime['ffmpeg'][name][fmt]['rate'][str(quality)][str(tile)][str(chunk)]['single']['times']['ut']) # s.append(dectime['ffmpeg'][name][fmt]['rate'][str(quality)][str(tile)][str(chunk)]['single']['size']) # times[name][str(quality)] = t # times[name][str(quality)] = s for chunk in range(1, config.duration + 1): times_a_ld.append(dectime['ffmpeg']['om_nom'][fmt]['rate'][str(2000000)][str(tile)][str(chunk)]['single']['times']['ut']) sizes_a_ld.append(dectime['ffmpeg']['om_nom'][fmt]['rate'][str(2000000)][str(tile)][str(chunk)]['single']['size']) times_a_hd.append(dectime['ffmpeg']['om_nom'][fmt]['rate'][str(24000000)][str(tile)][str(chunk)]['single']['times']['ut']) sizes_a_hd.append(dectime['ffmpeg']['om_nom'][fmt]['rate'][str(24000000)][str(tile)][str(chunk)]['single']['size']) times_b_ld.append(dectime['ffmpeg']['rollercoaster'][fmt]['rate'][str(2000000)][str(tile)][str(chunk)]['single']['times']['ut']) sizes_b_ld.append(dectime['ffmpeg']['rollercoaster'][fmt]['rate'][str(2000000)][str(tile)][str(chunk)]['single']['size']) times_b_hd.append(dectime['ffmpeg']['rollercoaster'][fmt]['rate'][str(24000000)][str(tile)][str(chunk)]['single']['times']['ut']) sizes_b_hd.append(dectime['ffmpeg']['rollercoaster'][fmt]['rate'][str(24000000)][str(tile)][str(chunk)]['single']['size']) # a = plt.Axes() plt.close() fig, ax = plt.subplots(2, 1, figsize=(10, 6), dpi=100) ax[0].hist(times_a_ld, bins=10, histtype='step', label=f'Om_non_{fmt}_rate2000000') ax[0].hist(times_a_hd, bins=10, histtype='step', label=f'Om_non_{fmt}_rate24000000') ax[0].hist(times_b_ld, bins=10, histtype='step', label=f'rollercoaster_{fmt}_rate2000000') ax[0].hist(times_b_hd, bins=10, histtype='step', label=f'rollercoaster_{fmt}_rate24000000') ax[0].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) ax[0].set_title(f'Tile {tile}') ax[0].set_xlabel('Times') ax[0].set_ylabel("Occurrence") ax[1].hist(times_a_ld, bins=10, density=True, cumulative=True, histtype='step', label=f'Om_non_{fmt}_rate2000000') ax[1].hist(times_a_hd, bins=10, density=True, cumulative=True, histtype='step', label=f'Om_non_{fmt}_rate24000000') ax[1].hist(times_b_ld, bins=10, density=True, cumulative=True, histtype='step', label=f'rollercoaster_{fmt}_rate2000000') ax[1].hist(times_b_hd, bins=10, density=True, cumulative=True, histtype='step', label=f'rollercoaster_{fmt}_rate24000000') ax[1].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) ax[1].set_xlabel('Times') ax[1].set_ylabel("CDF") plt.tight_layout() plt.savefig(f'{dirname}{sl}hist_{fmt}_tile{tile}') # plt.show() print(f'hist_{fmt}_tile{tile}') # plt.hist(times, bins=20) plt.close() fig, ax = plt.subplots(2, 1, figsize=(8, 6), dpi=100) ax[0].bar(np.array(range(len(times_a_ld))) - 0.3, times_a_ld, width=0.2, label=f'om_nom-{fmt}-rate{2000000}') ax[0].bar(np.array(range(len(times_a_hd))) - 0.1, times_a_hd, width=0.2, label=f'om_nom-{fmt}-rate{24000000}') ax[0].bar(np.array(range(len(times_b_ld))) + 0.1, times_b_ld, width=0.2, label=f'rollercoaster-{fmt}-rate{2000000}') ax[0].bar(np.array(range(len(times_b_hd))) + 0.3, times_b_hd, width=0.2, label=f'rollercoaster-{fmt}-rate{24000000}') ax[0].set_title(f'Tile {tile} - Atrasos') ax[0].set_ylabel("Time") ax[1].plot(sizes_a_ld, label=f'om_nom-{fmt}-rate{2000000}') ax[1].plot(sizes_a_hd, label=f'om_nom-{fmt}-rate{24000000}') ax[1].plot(sizes_b_ld, label=f'rollercoaster-{fmt}-rate{2000000}') ax[1].plot(sizes_b_hd, label=f'rollercoaster-{fmt}-rate{24000000}') ax[1].set_title(f'Tile {tile} - Taxas') ax[1].set_xlabel("Chunk") ax[1].set_ylabel("Time") ax[0].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) ax[1].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) plt.tight_layout() plt.savefig(f'{dirname}{sl}graph_{fmt}_tile{tile}') # plt.show() print(f'graph_{fmt}_tile{tile}')
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import dynamic_train, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='stage_1') train_loader = dataloader.normal_train_dataloader(data_path=pm.support, batch_size=args.batch_size) num_class = len(train_loader.dataset.classes) model = networks.Dynamic(num_class=num_class, resnet=args.resnet) model.cuda() train_func = partial(dynamic_train.train_stage_1, train_loader=train_loader) tm = util.Train_Manager(args, pm, config, train_func=train_func) tm.train(model)
import sys import torch import numpy as np from functools import partial import torch.nn as nn sys.path.append('../../') from utils import transfer_train, transfer_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='cub') train_loader = dataloader.normal_train_dataloader(data_path=pm.test_refer, batch_size=args.batch_size) num_class = len(train_loader.dataset.classes) model = networks.Transfer_PN(num_part=args.num_part, resnet=args.resnet) model.cuda() model.load_state_dict(torch.load(args.load_path)) model.linear_classifier = nn.Linear(model.dim, num_class).cuda() train_func = partial(transfer_train.default_train, train_loader=train_loader) tm = util.TM_transfer_PN_finetune(args, pm, config, train_func=train_func) tm.train(model) transfer_eval.eval_test(model, pm, config)
def graph1() -> None: """ chunks X dec_time (seconds) and chunks X file_size (Bytes) :return: """ dirname = 'graph1' config = util.Config('config.json') dectime = util.load_json('times.json') # decoders = ['ffmpeg', 'mp4client'] factors = ['rate'] threads = ['single'] # for decoder in decoders: for name in config.videos_list: for factor in factors: for quality in getattr(config, f'{factor}_list'): quality = np.array(quality) for thread in threads: for fmt in config.tile_list: m, n = list(map(int, fmt.split('x'))) plt.close() fig, ax = plt.subplots(1, 2, figsize=(18, 6)) for tile in range(1, m * n + 1): size = [] time_ffmpeg = [] # time_mp4client = [] for chunk in range(1, config.duration + 1): size.append(dectime['ffmpeg'][name][fmt] [factor][str(quality)][str(tile)][ str(chunk)][thread]['size']) time_ffmpeg.append( dectime['ffmpeg'][name][fmt][factor][str( quality)][str(tile)][str( chunk)][thread]['times']['ut']) # time_mp4client.append(dectime['mp4client'][name][fmt][factor][str(quality)][str(tile)][str(chunk)][thread]['times']) ax[0].plot(time_ffmpeg, label=f'ffmpeg_tile={tile}_ffmpeg') # ax[0][1].plot(time_mp4client, label=f'tile={tile}') ax[1].plot(size, label=f'tile={tile}') # ax[1][1].plot(time_ffmpeg, label=f'ffmpeg_tile={tile}_ffmpeg') # ax[1][1].plot(time_mp4client, label=f'mp4client_tile={tile}_mp4client') quality_ind = quality if factor in 'rate': quality_ind = int(quality / (m * n)) ax[0].set_xlabel('Chunks') # ax[0][1].set_xlabel('Chunks') ax[1].set_xlabel('Chunks') # ax[1][1].set_xlabel('Chunks') ax[0].set_ylabel('Time') # ax[0][1].set_ylabel('Time') # ax[1][1].set_ylabel('Time') ax[1].set_ylabel('Rate') ax[0].set_title( f'ffmpeg - {name} - Times by chunks, tile={fmt}, {factor}={quality_ind}' ) # ax[0][1].set_title(f'mp4client {name} - Times by chunks, tile={fmt}, {factor}={quality_ind}') ax[1].set_title( f'{name} - Rates by chunks, tile={fmt}, {factor}={quality_ind}' ) # ax[1][1].set_title(f'mp4client x ffmpeg - {name} - Times by chunks, tile={fmt}, {factor}={quality_ind}') # ax[0].set_ylim(bottom=0) # ax[1].set_ylim(bottom=0) ax[1].set_ylim(bottom=0) # ax[1][1].set_ylim(bottom=0) # ax[0][1].legend(loc='upper left', ncol=2, bbox_to_anchor=(1.01, 1.0)) ax[1].legend(loc='upper left', ncol=2, bbox_to_anchor=(1.01, 1.0)) plt.tight_layout() # plt.() os.makedirs(dirname, exist_ok=True) print( f'Salvando {dirname}{sl}{name}_{fmt}_{factor}={quality_ind}.' ) fig.savefig( f'{dirname}{sl}{name}_{fmt}_{factor}={quality_ind}' ) # fig.show() 1
import subprocess from utils import util config = util.Config('config.json') sl = util.check_system()['sl'] video = util.VideoParams(config) config.duration = '60' original = f'..{sl}original' yuv_forders_10s = f'..{sl}yuv-10s' yuv_forders_60s = f'..{sl}yuv-full' util.makedir(f'{yuv_forders_10s}') util.makedir(f'{yuv_forders_60s}') scale = config.scale fps = config.fps for name in config.videos_list: start_time = config.videos_list[name]['time'] out_name = f'{name}_{scale}_{fps}.yuv' in_name = f'{original}{sl}{name}.mp4' par_in = f'-y -hide_banner -v quiet -ss {start_time} -i {in_name}' par_out_10s = f'-t 10 -r {fps} -vf scale={scale} -map 0:0 ..{sl}yuv-10s{sl}{out_name}' command = f'ffmpeg {par_in} {par_out_10s}' print(command) subprocess.run(command, shell=True, stderr=subprocess.STDOUT) par_out_60s = f'-t 60 -r {fps} -vf scale={scale} -map 0:0 ..{sl}yuv-full{sl}{out_name}'
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import transfer_train, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='base', train_annot='part') train_loader = dataloader.normal_train_dataloader(data_path=pm.support, batch_size=args.batch_size, annot=config.train_annot, annot_path=pm.annot_path) model = networks.Transfer_PN(num_part=args.num_part, resnet=args.resnet) model.cuda() train_func = partial(transfer_train.PN_train, train_loader=train_loader, alpha=args.alpha) tm = util.Train_Manager(args, pm, config, train_func=train_func) tm.train(model)
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import proto_train,proto_eval,networks,dataloader,util args,name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot',args=args) config = util.Config(args,name, shots=[5,5,10], train_annot='bbx', eval_annot='bbx') train_loader = dataloader.meta_train_dataloader(data_path=pm.support, shots=config.shots, way=config.way, annot=config.train_annot, annot_path=pm.annot_path) model = networks.Proto_FSL(way=config.way, shots=config.shots, resnet=args.resnet) model.cuda() train_func = partial(proto_train.default_train,train_loader=train_loader) eval_func = proto_eval.default_eval
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import dynamic_train, dynamic_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='stage_2', shots=[20]) train_loader = dataloader.meta_train_dataloader(data_path=pm.support, way=config.way, shots=config.shots) num_class = len(train_loader.dataset.classes) model = networks.Dynamic_PN(num_class=num_class, num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet) model.cuda() model.load_state_dict(torch.load(args.load_path)) train_func = partial(dynamic_train.train_stage_2, train_loader=train_loader) eval_func = dynamic_eval.default_eval
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import transfer_train, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='base') train_loader = dataloader.normal_train_dataloader(data_path=pm.support, batch_size=args.batch_size) model = networks.Transfer(resnet=args.resnet) model.cuda() train_func = partial(transfer_train.default_train, train_loader=train_loader) tm = util.Train_Manager(args, pm, config, train_func=train_func) tm.train(model)
def graph3() -> None: """ bar fmt X average_dec_time (seconds) and fmt X average_rate (Bytes) :return: None """ dirname = 'graph3' config = util.Config('config.json') dectime = util.load_json('times.json') # decoders = ['ffmpeg', 'mp4client'] factors = ['rate'] threads = ['single'] # for decoder in decoders: for name in config.videos_list: for factor in factors: for thread in threads: df = pd.DataFrame() plt.close() fig, ax = plt.subplots(2, 1, figsize=(8, 5)) quality_list = getattr(config, f'{factor}_list') offset = 0 for quality in quality_list: average_size = [] std_size = [] average_time = [] std_time = [] width = 0.8 / len(quality_list) start_position = (0.8 - width) / 2 for fmt in config.tile_list: m, n = list(map(int, fmt.split('x'))) size = [] time = [] for tile in range(1, m * n + 1): for chunk in range(1, config.duration + 1): size.append(dectime['ffmpeg'][name][fmt][factor][str(quality)][str(tile)][str(chunk)][thread]['size']) time.append(dectime['ffmpeg'][name][fmt][factor][str(quality)][str(tile)][str(chunk)][thread]['times']['ut']) average_size.append(np.average(size)) std_size.append(np.std(size)) average_time.append(np.average(time)) std_time.append(np.std(time)) x = np.array(range(1, len(average_time) + 1)) - start_position + offset offset += width ax[0].bar(x, average_time, width=width, yerr=std_time, label=f'rate_total={quality}') ax[1].bar(x, average_size, width=width, yerr=std_size, label=f'rate_total={quality}') df[f'times_{name}_{quality}'] = average_time ax[0].set_xticklabels(config.tile_list) ax[0].set_xticks(np.array(range(1, len(config.tile_list) + 1))) ax[1].set_xticklabels(config.tile_list) ax[1].set_xticks(np.array(range(1, len(config.tile_list) + 1))) ax[0].set_xlabel('Tile') ax[1].set_xlabel('Tile') ax[0].set_ylabel('Average Time') ax[1].set_ylabel('Average Rate') ax[0].set_title(f'{name} - Times by tiles, {factor}') ax[1].set_title(f'{name} - Rates by tiles, {factor}') ax[0].set_ylim(bottom=0) ax[1].set_ylim(bottom=0) ax[0].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) ax[1].legend(loc='upper left', ncol=1, bbox_to_anchor=(1.01, 1.0)) plt.tight_layout() os.makedirs(dirname, exist_ok=True) print(f'Salvando {dirname}{sl}{name}_{factor}.') fig.savefig(f'{dirname}{sl}{name}_{factor}') # plt.show() 1
#!/bin/env python3 from utils import util cfg = util.Config('config.json', factor='crf') sl = cfg.sl cfg.videos_list = { "ball": {}, "elephants": {}, "lions": {}, "manhattan": {}, "om_nom": {}, "pluto": {}, "ski": {}, "super_mario": {} } project = (f'ffmpeg_{cfg.factor}_{len(cfg.videos_list)}videos_' f'{cfg.duration}s') yuv_input = f'..{sl}yuv-full' server = False if server: gpds = f'{sl}mnt{sl}ssd{sl}henrique{sl}' else: gpds = '' output = f'{gpds}results{sl}{project}' def main():
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import dynamic_train,networks,dataloader,util args,name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot',args=args) config = util.Config(args=args, name=name, suffix='stage_1', train_annot='part') train_loader = dataloader.normal_train_dataloader(data_path=pm.support, batch_size=args.batch_size, annot=config.train_annot, annot_path=pm.annot_path) num_class = len(train_loader.dataset.classes) model = networks.Dynamic_PN_gt(num_class=num_class, num_part=args.num_part, resnet=args.resnet) model.cuda() train_func = partial(dynamic_train.train_stage_1,train_loader=train_loader)
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import proto_train, proto_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args, name, train_annot='part', suffix="percent_%d-bz_%d" % (args.percent, args.batch_size)) train_loader = dataloader.proto_train_less_annot_dataloader( data_path=pm.support, shots=config.shots, way=config.way, annot_path=pm.annot_path, percent=args.percent, batch_size=args.batch_size) model = networks.Proto_PN_less_annot(num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet) model.cuda()
import sys import torch import numpy as np from functools import partial sys.path.append('../../') from utils import proto_train, proto_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args, name, train_annot='part') train_loader = dataloader.meta_train_dataloader(data_path=pm.support, shots=config.shots, way=config.way, annot=config.train_annot, annot_path=pm.annot_path) model = networks.Proto_MT(num_part=args.num_part, way=config.way, shots=config.shots, resnet=args.resnet) model.cuda() train_func = partial(proto_train.PN_train, train_loader=train_loader, alpha=args.alpha) eval_func = proto_eval.default_eval
import sys import torch import numpy as np from functools import partial import torch.nn as nn sys.path.append('../../') from utils import transfer_train, transfer_eval, networks, dataloader, util args, name = util.train_parser() pm = util.Path_Manager('../../dataset/cub_fewshot', args=args) config = util.Config(args=args, name=name, suffix='cub', train_annot='part', eval_annot='part') train_loader = dataloader.normal_train_dataloader(data_path=pm.test_refer, batch_size=args.batch_size, annot=config.train_annot, annot_path=pm.annot_path) num_class = len(train_loader.dataset.classes) model = networks.Transfer_PN_gt(num_part=args.num_part, resnet=args.resnet) model.cuda() model.load_state_dict(torch.load(args.load_path)) model.linear_classifier = nn.Linear(model.dim, num_class).cuda() train_func = partial(transfer_train.default_train, train_loader=train_loader)