def check_downloaded(p): def _progress(count, block_size, total_size): sys.stdout.write('\r>> %s %.1f%%' % (_fpath, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() def _create_url(name): return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name) _model_urls = { 'pascal_mnet_edge_semseg_human_parts_normals_sal_' 'arch-mnetv2_pretr-imagenet_trBatch-16_lr-0.001_epochs-130_trNorm_poly_seenc_sedec_edge_w-0.95_130' } ans = False _check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch']) _fpath = os.path.join(Path.exp_dir(), _check + '.tgz') if _check in _model_urls: if not os.path.isfile(os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')): urllib.request.urlretrieve(_create_url(_check), _fpath, _progress) # extract file cwd = os.getcwd() print('\nExtracting tar file') tar = tarfile.open(_fpath) os.chdir(Path.exp_dir()) tar.extractall() tar.close() os.chdir(cwd) print('Done!') ans = True return ans
def main(): exp_root_dir = os.path.join(Path.exp_dir(), 'pascal_se') edge_dirs = glob.glob(os.path.join(exp_root_dir, 'edge*')) p = {} for edge_dir in edge_dirs: p['save_dir_root'] = os.path.join(exp_root_dir, edge_dir) # sync_and_evaluate_subfolders(p, 'NYUD') gather_results(p, 'PASCALContext')
def main(): from fblib.util.mypath import Path database = 'FSV' save_dir = os.path.join(Path.exp_dir(), 'fsv_se/albedo') # Evaluate all sub-folders exp_names = glob.glob(save_dir + '/*') exp_names = [x.split('/')[-1] for x in exp_names] for exp_name in exp_names: if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'albedo')): print('Evaluating: {}'.format(exp_name)) try: eval_and_store_albedo(database, save_dir, exp_name) except FileNotFoundError: print('Results of {} are not ready'.format(exp_name))
def main(): from fblib.util.mypath import Path database = 'PASCALContext' save_dir = os.path.join(Path.exp_dir(), 'pascal_se/edge_semseg_human_parts_normals_sal') # Evaluate all sub-folders exp_names = glob.glob(save_dir + '/*') exp_names = [x.split('/')[-1] for x in exp_names] for exp_name in exp_names: if os.path.isdir( os.path.join(save_dir, exp_name, 'Results_' + database, 'human_parts')): print('Evaluating: {}'.format(exp_name)) try: eval_and_store_human_parts(database, save_dir, exp_name) except FileNotFoundError: print('Results of {} are not ready'.format(exp_name))
def create_config(): cfg = edict() args = parse_args() # Parse tasks assert (len(args.active_tasks) == 5) args.do_edge = args.active_tasks[0] args.do_semseg = args.active_tasks[1] args.do_human_parts = args.active_tasks[2] args.do_normals = args.active_tasks[3] args.do_sal = args.active_tasks[4] print('\nThis script was run with the following parameters:') for x in vars(args): print('{}: {}'.format(x, str(getattr(args, x)))) cfg.resume_epoch = args.resume_epoch cfg.DO_EDGE = args.do_edge cfg.DO_SEMSEG = args.do_semseg cfg.DO_HUMAN_PARTS = args.do_human_parts cfg.DO_NORMALS = args.do_normals cfg.DO_SAL = args.do_sal if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL: raise ValueError("Select at least one task") cfg['arch'] = args.arch cfg['pretr'] = args.pretr cfg['trBatch'] = args.trBatch cfg['lr'] = args.lr cfg['lr_dec'] = args.lr_dec cfg['wd'] = args.wd cfg['cls'] = args.cls cfg['epochs'] = args.epochs cfg['stride'] = args.stride cfg['trNorm'] = args.trNorm cfg['dec_w'] = args.dec_w # Set Modulation (Squeeze and Exciation, Residual Adapters) parameters cfg['seenc'] = args.seenc cfg['sedec'] = args.sedec cfg['adapters'] = args.adapt if cfg['sedec']: cfg['norm_per_task'] = True else: cfg['norm_per_task'] = False if args.dscr == 'None': args.dscr = None cfg['dscr_type'] = args.dscr cfg['lr_dscr'] = args.lr_dscr cfg['dscr_w'] = args.dscr_w cfg['dscrd'] = args.dscrd cfg['dscrk'] = args.dscrk task_args, name_args = get_exp_name(args) cfg['exp_folder_name'] = 'pascal_resnet' cfg['exp_name'] = "_".join(name_args) cfg['tasks_name'] = "_".join(task_args) cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name']) if args.onlyVOC: cfg['train_db_name'] = ['VOC12', 'SBD'] cfg['test_db_name'] = 'VOC12' cfg['infer_db_names'] = [ 'VOC12', ] else: cfg['train_db_name'] = [ 'PASCALContext', ] cfg['test_db_name'] = 'PASCALContext' cfg['infer_db_names'] = [ 'PASCALContext', ] # Which tasks? cfg.TASKS = edict() cfg.TASKS.NAMES = [] cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task? cfg.TASKS.TB_MIN = {} cfg.TASKS.TB_MAX = {} cfg.TASKS.LOSS_MULT = {} cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC} cfg.TASKS.INFER_FLAGVALS = {} if cfg.DO_EDGE: # Edge Detection print('Adding task: Edge Detection') tmp = 'edge' cfg.TASKS.NAMES.append(tmp) cfg.TASKS.NUM_OUTPUT[tmp] = 1 cfg.TASKS.TB_MIN[tmp] = 0 cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] cfg.TASKS.LOSS_MULT[tmp] = 50 cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR # Add task-specific parameters from parser cfg['edge_w'] = args.edge_w cfg['eval_edge'] = False if cfg.DO_SEMSEG: # Semantic Segmentation print('Adding task: Semantic Segmentation') tmp = 'semseg' cfg.TASKS.NAMES.append(tmp) cfg.TASKS.NUM_OUTPUT[tmp] = 21 cfg.TASKS.TB_MIN[tmp] = 0 cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1 cfg.TASKS.LOSS_MULT[tmp] = 1 cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST if cfg.DO_HUMAN_PARTS: # Human Parts Segmentation print('Adding task: Human Part Segmentation') tmp = 'human_parts' cfg.TASKS.NAMES.append(tmp) cfg.TASKS.NUM_OUTPUT[tmp] = 7 cfg.TASKS.TB_MIN[tmp] = 0 cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1 cfg.TASKS.LOSS_MULT[tmp] = 2 cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST if cfg.DO_NORMALS: # Human Parts Segmentation print('Adding task: Normals') tmp = 'normals' cfg.TASKS.NAMES.append(tmp) cfg.TASKS.NUM_OUTPUT[tmp] = 3 cfg.TASKS.TB_MIN[tmp] = -1 cfg.TASKS.TB_MAX[tmp] = 1 cfg.TASKS.LOSS_MULT[tmp] = 10 cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR cfg['normloss'] = 1 # Hard-coded L1 loss for normals if cfg.DO_SAL: # Saliency Estimation print('Adding task: Saliency') tmp = 'sal' cfg.TASKS.NAMES.append(tmp) cfg.TASKS.NUM_OUTPUT[tmp] = 1 cfg.TASKS.TB_MIN[tmp] = 0 cfg.TASKS.TB_MAX[tmp] = 1 cfg.TASKS.LOSS_MULT[tmp] = 5 cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk cfg.NETWORK = edict() # Visualize the network on Tensorboard / pdf? cfg.NETWORK.VIS_NET = False cfg.TRAIN = edict() cfg.TRAIN.SCALE = (512, 512) cfg.TRAIN.MOMENTUM = 0.9 cfg.TRAIN.TENS_VIS = True cfg.TRAIN.TENS_VIS_INTER = 1000 cfg.TRAIN.TEMP_LOSS_INTER = 1000 cfg.TEST = edict() # See evolution of the test set when training? cfg.TEST.USE_TEST = True cfg.TEST.TEST_INTER = 10 cfg.TEST.SCALE = (512, 512) cfg.SEED = 0 cfg.EVALUATE = True cfg.DEBUG = False cfg['overfit'] = args.overfit if cfg['overfit']: cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name']) cfg['exp_name'] = 'test' cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name']) return cfg
def parse_folder(exp_root=Path.exp_dir(), exp_group='pascal_se', tasks=None, db_name='PASCALContext', query='*', dic={}): if tasks is None: tasks = ['edge', 'semseg', 'human_parts', 'normals', 'sal', 'depth'] exp_group_dir = os.path.join(exp_root, exp_group) dirs = os.listdir(exp_group_dir) dirs.sort() best_perf = {task: 0 for task in tasks} for task in {'normals', 'depth', 'albedo'}: if task in tasks: best_perf[task] = 100 # Examine all subdirectories for d in dirs: dir_in = os.path.join(exp_group_dir, d) # No dir or dir without subdirs if not os.path.isdir(dir_in) or not exists_dir(dir_in): continue # If results folder in dir, print results if ('Results_' + db_name) in os.listdir(dir_in): perf = {} task_counter = 0 # Iterate through all tasks for i, task in enumerate(tasks): fnames = glob.glob(dir_in + '/Results_' + db_name + '/' + query + task + '.json') if not fnames: perf[task] = -1 continue task_counter += 1 with open(fnames[0], 'r') as f: data = json.load(f) if task == 'edge': perf[task] = 100 * data['ods_f'] if perf[task] > best_perf[task]: best_perf[task] = perf[task] elif task == 'semseg': perf[task] = 100 * data['mIoU'] if perf[task] > best_perf[task]: best_perf[task] = perf[task] elif task == 'human_parts': perf[task] = 100 * data['mIoU'] if perf[task] > best_perf[task]: best_perf[task] = perf[task] elif task == 'normals': perf[task] = data['mean'] if perf[task] < best_perf[task]: best_perf[task] = perf[task] elif task == 'depth': perf[task] = data['rmse'] if perf[task] < best_perf[task]: best_perf[task] = perf[task] elif task == 'albedo': perf[task] = data['rmse'] if perf[task] < best_perf[task]: best_perf[task] = perf[task] elif task == 'sal': perf[task] = 100 * data['mIoU'] if perf[task] > best_perf[task]: best_perf[task] = perf[task] perf_str = [ task + ' ' + '%06.3f' % perf[task] + ' ' for i, task in enumerate(tasks) ] perf_str = "".join(perf_str) if task_counter > 0: print('{}: {}'.format(perf_str, d)) dic[d] = perf elif 'models' in os.listdir(dir_in): # Results are not ready yet continue else: # Examine subdirectories recursively print('\n\n{}\n'.format(d)) parse_folder(exp_group=os.path.join(exp_group, d), tasks=tasks, query=query, db_name=db_name, dic=dic) print(best_perf)
def main(): args = parse_args() best_prec1 = 0 if not args.group_norm: save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch) else: save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch + '-GN') if not os.path.isdir(save_dir): os.makedirs(save_dir) log = open( os.path.join(save_dir, '{}.{}.log'.format(args.arch, args.prefix)), 'w') # create model print_log("=> creating model '{}'".format(args.arch), log) resol = 224 if args.arch == 'res26': model = resnet.resnet26(pretrained=False, group_norm=args.group_norm) elif args.arch == 'res50': model = resnet.resnet50(pretrained=False, group_norm=args.group_norm) elif args.arch == 'res101': model = resnet.resnet101(pretrained=False, group_norm=args.group_norm) elif args.arch == 'x50': model = resnext.resnext50_32x4d(pretrained=False) elif args.arch == 'x101': model = resnext.resnext101_32x4d(pretrained=False) elif args.arch == 'res26-se': model = se_resnet.se_resnet26(num_classes=1000) elif args.arch == 'res50-se': model = se_resnet.se_resnet50(num_classes=1000) elif args.arch == 'res101-se': model = se_resnet.se_resnet101(num_classes=1000) elif args.arch == 'mobilenet-v2': model = mobilenet_v2.mobilenet_v2(pretrained=False, n_class=1000, last_channel=2048) print_log("=> Model : {}".format(model), log) print_log("=> parameter : {}".format(args), log) if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): model.features = torch.nn.DataParallel(model.features, device_ids=list( range(args.n_gpu))) model.cuda() else: model = torch.nn.DataParallel(model, device_ids=list(range( args.n_gpu))).cuda() # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print_log("=> loading checkpoint '{}'".format(args.resume), log) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print_log( "=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch']), log) else: raise ValueError("=> no checkpoint found at '{}'".format( args.resume)) cudnn.benchmark = True # Data loading code traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(resol), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(resol), transforms.ToTensor(), normalize, ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) if args.evaluate: validate(val_loader, model, criterion) return filename = os.path.join( save_dir, 'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix)) bestname = os.path.join( save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix)) start_time = time.time() epoch_time = AverageMeter() for epoch in range(args.start_epoch, args.epochs): lr = adjust_learning_rate(optimizer, epoch, args) need_hour, need_mins, need_secs = convert_secs2time( epoch_time.val * (args.epochs - epoch)) need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format( need_hour, need_mins, need_secs) print_log( ' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s} LR={:}'.format( args.arch, epoch, args.epochs, time_string(), need_time, lr), log) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, log, args) # evaluate on validation set prec1 = validate(val_loader, model, criterion, log, args) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint( { 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer': optimizer.state_dict(), 'args': copy.deepcopy(args), }, is_best, filename, bestname) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() log.close()