def test_best(main_dict, reset=None): _, val_set = load_trainval(main_dict) history = ms.load_history(main_dict) # if reset == "reset": try: pred_annList = ms.load_best_annList(main_dict) except: model = ms.load_best_model(main_dict) pred_annList = dataset2annList(model, val_set, predict_method="BestDice", n_val=None) ms.save_pkl(main_dict["path_best_annList"], pred_annList) # else: # pred_annList = ms.load_best_annList(main_dict) gt_annDict = load_gtAnnDict(main_dict) results = get_perCategoryResults(gt_annDict, pred_annList) result_dict = results["result_dict"] # result_dict[] = # result_dict[] = result_dict["Model"] = main_dict["model_name"] result_dict["epoch"] = history["best_model"]["epoch"] result_list = test_baselines(main_dict) result_list += [result_dict] print(pd.DataFrame(result_list))
def test_load(main_dict, metric_name, predict_proposal=None): if predict_proposal is None: predict_proposal = "" results = glob.glob( main_dict["path_save"] + "/test_{}{}_[0-9]*.json".format(predict_proposal, metric_name)) results_dict = {} for r in results: results_dict[int( os.path.basename(r).replace(".json", "").split("_")[-1])] = r if len(results_dict) != 0: best = max(results_dict.keys()) fname = results_dict[best] result = ms.load_json(fname) #ms.save_json(fname.replace("None", main_dict["metric_name"]), result) history = ms.load_history(main_dict) if history is None: return "{:.2f}".format(result[metric_name]) best_epoch = history["best_model"]["epoch"] if best_epoch == best: return "{:.2f} - ({})".format(result[metric_name], best, predict_proposal) else: return "{:.2f}* - ({})".format(result[metric_name], best, predict_proposal) else: return "empty"
def test_run(main_dict, metric_name, save, reset, predict_proposal=None): if predict_proposal is None: predict_proposal = "" history = ms.load_history(main_dict) if history is None: best_epoch = 0 else: best_epoch = history["best_model"]["epoch"] fname = main_dict["path_save"] + "/test_{}{}_{}.json".format( predict_proposal, metric_name, best_epoch) print("Testing: {} - {} - {} - {} - best epoch: {}".format( main_dict["dataset_name"], main_dict["config_name"], main_dict["loss_name"], metric_name, best_epoch)) if not os.path.exists(fname) or reset == "reset": with torch.no_grad(): score = ms.val_test(main_dict, metric_name=metric_name, n_workers=1) ms.save_json(fname, score) else: score = ms.load_json(fname) return score[metric_name]
def train(exp_dict): history = ms.load_history(exp_dict) if 'only_supervised' in exp_dict: tgt_trainloader_supervised, tgt_testloader_supervised = ms.get_tgt_loader_supervised( exp_dict) # load models tgt_model, tgt_opt, tgt_scheduler, _, _, _ = ms.load_model_tgt( exp_dict) fit_source_supervised(tgt_model, tgt_opt, tgt_scheduler, tgt_trainloader_supervised, exp_dict) tgt_acc = test.validate(tgt_model, tgt_model, tgt_trainloader_supervised, tgt_testloader_supervised) print("{} TEST Accuracy Supervised =========== {:2%}\n".format( exp_dict["tgt_dataset"], tgt_acc)) ms.save_model_tgt(exp_dict, history, tgt_model, tgt_opt) else: # Source src_trainloader, src_valloader = ms.load_src_loaders(exp_dict) ####################### 1. Train source model src_model, src_opt, src_scheduler = ms.load_model_src(exp_dict) # Train Source if exp_dict["reset_src"]: history = fit_source(src_model, src_opt, src_scheduler, src_trainloader, history, exp_dict) # Test Source src_acc = test.validate(src_model, src_model, src_trainloader, src_valloader) print("{} TEST Accuracy = {:2%}\n".format(exp_dict["src_dataset"], src_acc)) history["src_acc"] = src_acc ms.save_model_src(exp_dict, history, src_model, src_opt) ####################### 2. Train target model tgt_trainloader, tgt_valloader = ms.load_tgt_loaders(exp_dict) #load models tgt_model, tgt_opt, tgt_scheduler, disc_model, disc_opt, disc_scheduler = ms.load_model_tgt( exp_dict) tgt_model.load_state_dict(src_model.state_dict()) if exp_dict["reset_tgt"]: history = fit_target(src_model, tgt_model, tgt_opt, tgt_scheduler, disc_model, disc_opt, disc_scheduler, src_trainloader, tgt_trainloader, tgt_valloader, history, exp_dict)
def train(exp_dict): history = ms.load_history(exp_dict) # Source src_trainloader, src_valloader = ms.load_src_loaders(exp_dict) ####################### 1. Train source model src_model, src_opt = ms.load_model_src(exp_dict) # Train Source history = fit_source(src_model, src_opt, src_trainloader, history, exp_dict) # Test Source src_acc = test.validate(src_model, src_model, src_trainloader, src_valloader) print("{} TEST Accuracy = {:2%}\n".format(exp_dict["src_dataset"], src_acc)) history["src_acc"] = src_acc ms.save_model_src(exp_dict, history, src_model, src_opt) ####################### 2. Train target model tgt_trainloader, tgt_valloader = ms.load_tgt_loaders(exp_dict) # load models tgt_model, tgt_opt, disc_model, disc_opt = ms.load_model_tgt(exp_dict) tgt_model.load_state_dict(src_model.state_dict()) history = fit_target(src_model, tgt_model, tgt_opt, disc_model, disc_opt, src_trainloader, tgt_trainloader, tgt_valloader, history, exp_dict) ms.save_model_tgt(exp_dict, history, tgt_model, tgt_opt, disc_model, disc_opt) exp_dict["reset_src"] = 0 exp_dict["reset_tgt"] = 0 ms.test_latest_model(exp_dict)
figsize=(5, 4), legend_type="line", yscale="linear", subplots=(1, 1), shareRowLabel=True) for exp_name in args.expList: exp_dict = experiments.get_experiment_dict(args, exp_name) exp_dict["reset_src"] = args.reset_src exp_dict["reset_tgt"] = args.reset_tgt # SET SEED np.random.seed(10) torch.manual_seed(10) torch.cuda.manual_seed_all(10) history = ms.load_history(exp_dict) # Main options if args.mode == "test_model": results[exp_name] = ms.test_latest_model(exp_dict, verbose=0) elif args.mode == "train": train.train(exp_dict) if args.mode == "copy_models": results[exp_name] = ms.copy_models( exp_dict, path_dst="{}/".format(exp_name)) # MISC
def test_model(main_dict, reset=None): # pointDict = load_LCFCNPoints(main_dict) _, val_set = load_trainval(main_dict) model = ms.load_best_model(main_dict) gt_annDict = load_gtAnnDict(main_dict) # for i in range(50): import ipdb; ipdb.set_trace() # breakpoint 887ad390 // if 1: b_list = [23] for i in b_list: batch = ms.get_batch(val_set, [i]) annList_ub = pointList2UpperBoundMask(batch["lcfcn_pointList"], batch)["annList"] annList_bo = pointList2BestObjectness(batch["lcfcn_pointList"], batch)["annList"] annList = model.predict(batch, predict_method="BestDice")["annList"] results = get_perSizeResults(gt_annDict, annList) print(i,"Counts:", batch["counts"].item(), " - BestObjectness:", len(annList_bo), " - Model:", len(annList), " - UpperBound", len(annList_ub)) print(i, get_perSizeResults(gt_annDict, annList_bo, pred_images_only=1)["result_dict"]["0.25"], get_perSizeResults(gt_annDict, annList, pred_images_only=1)["result_dict"]["0.25"], get_perSizeResults(gt_annDict, annList_ub, pred_images_only=1)["result_dict"]["0.25"]) import ipdb; ipdb.set_trace() # breakpoint 98d0193a // image_points = ms.get_image(batch["images"], batch["points"], enlarge=1,denorm=1) ms.images(image_points, annList2mask(annList)["mask"], win="model prediction") ms.images(batch["images"], annList2mask(annList_bo)["mask"],win="2", denorm=1) ms.images(batch["images"], annList2mask(annList_ub)["mask"], win="3", denorm=1) ms.images(batch["images"], batch["points"], win="4", enlarge=1,denorm=1) ms.images(batch["images"], model.predict(batch, predict_method="points")["blobs"], win="5", enlarge=1,denorm=1) ms.images(batch["images"], pointList2points(batch["lcfcn_pointList"])["mask"], win="predicted_points", enlarge=1,denorm=1) fname = main_dict["path_baselines"].replace("baselines", main_dict["model_name"]) if reset == "reset": _, val_set = load_trainval(main_dict) history = ms.load_history(main_dict) import ipdb; ipdb.set_trace() # breakpoint a769ce6e // model = ms.load_best_model(main_dict) pred_annList = dataset2annList(model, val_set, predict_method="BestDice", n_val=None) pred_annList_up = load_predAnnList(main_dict, predict_method="UpperBoundMask") pred_annList_up = load_predAnnList(main_dict, predict_method="UpperBound") gt_annDict = load_gtAnnDict(main_dict) results = get_perSizeResults(gt_annDict, pred_annList) result_dict = results["result_dict"] result_dict["Model"] = main_dict["model_name"] result_list = [result_dict] ms.save_pkl(fname, result_list) else: result_list = ms.load_pkl(fname) return result_list
def train(exp_dict): history = ms.load_history(exp_dict) #Simone: data_transform = transforms.Compose([ transforms.Resize((exp_dict['image_size'], exp_dict['image_size']), interpolation=1), transforms.ToTensor() ]) # CUDA for PyTorch use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") history = ms.load_history(exp_dict) src_trainloader = get_coxs2v_trainset( exp_dict["still_dir"], exp_dict["video1_dir"], exp_dict["video1_pairs"], train_folds, exp_dict["cross_validation_num_fold"], data_transform, people_per_batch, images_per_person, video_only=True, samples_division_list=[0.6, 0.4], # [0.6, 0.4] div_idx=0) src_valloader = get_coxs2v_trainset( exp_dict["still_dir"], exp_dict["video1_dir"], exp_dict["video1_pairs"], train_folds, exp_dict["cross_validation_num_fold"], data_transform, people_per_batch, images_per_person, video_only=True, samples_division_list=[0.6, 0.4], # [0.6, 0.4] div_idx=1) # Source #src_trainloader, src_valloader = ms.load_src_loaders(exp_dict) ####################### 1. Train source model src_model, src_opt = ms.load_model_src(exp_dict) # Train Source history = fit_source(src_model, src_opt, src_trainloader, history, exp_dict) # Test Source src_acc = test.validate(src_model, src_model, src_trainloader, src_valloader) print("{} TEST Accuracy = {:2%}\n".format(exp_dict["src_dataset"], src_acc)) history["src_acc"] = src_acc ms.save_model_src(exp_dict, history, src_model, src_opt) ####################### 2. Train target model #tgt_trainloader, tgt_valloader = ms.load_tgt_loaders(exp_dict) tgt_trainloader = get_coxs2v_trainset( exp_dict["still_dir"], exp_dict["video2_dir"], exp_dict["video2_pairs"], train_folds, exp_dict["cross_validation_num_fold"], data_transform, people_per_batch, images_per_person, video_only=True, samples_division_list=[0.6, 0.4], # [0.6, 0.4] div_idx=0) tgt_valloader = get_coxs2v_trainset( exp_dict["still_dir"], exp_dict["video2_dir"], exp_dict["video2_pairs"], train_folds, exp_dict["cross_validation_num_fold"], data_transform, people_per_batch, images_per_person, video_only=True, samples_division_list=[0.6, 0.4], # [0.6, 0.4] div_idx=1) # load models tgt_model, tgt_opt, disc_model, disc_opt = ms.load_model_tgt(exp_dict) tgt_model.load_state_dict(src_model.state_dict()) history = fit_target(src_model, tgt_model, tgt_opt, disc_model, disc_opt, src_trainloader, tgt_trainloader, tgt_valloader, history, exp_dict) ms.save_model_tgt(exp_dict, history, tgt_model, tgt_opt, disc_model, disc_opt) exp_dict["reset_src"] = 0 exp_dict["reset_tgt"] = 0 ms.test_latest_model(exp_dict)
def debug(main_dict): #ud.debug_sheep(main_dict) loss_dict = main_dict["loss_dict"] metric_dict = main_dict["metric_dict"] metric_name = main_dict["metric_name"] metric_class = main_dict["metric_dict"][metric_name] loss_name = main_dict["loss_name"] batch_size = main_dict["batch_size"] ms.print_welcome(main_dict) train_set, val_set = ms.load_trainval(main_dict) #test_set = ms.load_test(main_dict) # train_set, val_set = ms.load_trainval(main_dict) #batch=ms.get_batch(test_set, indices=[509]) # batch=ms.get_batch(val_set, indices=[0, 4, 9]) # b2 = um.get_batch(val_set, indices=[4]) # ms.fitBatch(model, batch, loss_name="image_loss", opt=opt, epochs=100) # batch_train=ms.get_batch(val_set, indices=[15]) # batch=ms.get_batch(val_set, indices=[15]) # tr_batch=ms.get_batch(val_set, indices=[2]) #batch=ms.get_batch(val_set, indices=[1,2,3,12,13,14,16,17,67,68,70]) # batch=ms.get_batch(val_set,indices=[300]) # ms.images(batch["images"], batch["points"],denorm=1,enlarge=1) # for i in range(len(val_set)): # batch=ms.get_batch(val_set,indices=[i]) # sharp_proposals = prp.Sharp_class(batch) # sharp_proposals = prp.Sharp_class(batch) # pointList = bu.mask2pointList(batch["points"])["pointList"] # propDict = bu.pointList2propDict(pointList, sharp_proposals, thresh=0.5) # for i in range(len(train_set)): # print(i) # sharp_proposals = prp.Sharp_class(ms.get_batch(train_set,indices=[i])) # d2c.pascal2cocoformat(main_dict) # model, opt, _ = ms.init_model_and_opt(main_dict) # # history = ms.load_history(main_dict) # print(pd.DataFrame(history["val"])) # print(pd.DataFrame(history["train"])[loss_name]) model, opt, _ = ms.init_model_and_opt(main_dict) import ipdb; ipdb.set_trace() # breakpoint b87b640d // batch = ms.get_batch(val_set, indices=[1]) ms.visBlobs(model, batch, predict_method="BestDice") import ipdb; ipdb.set_trace() # breakpoint a18a7b92 // plants.save_test_to_h5(main_dict) model = ms.load_best_model(main_dict) if 1: import train train.validation_phase_mIoU(ms.load_history(main_dict), main_dict, model, val_set, "BestDice", 0) test_set = ms.load_test(main_dict) batch = ms.get_batch(test_set, indices=[4]) # model, opt, _ = ms.init_model_and_opt(main_dict) batch = ms.get_batch(val_set, indices=[4]) ms.images(batch["images"], model.predict( batch , predict_method="BestDice", use_trans=1, sim_func=au.compute_dice)["blobs"], denorm=1) val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) model = ms.load_lcfcn(train_set, mode="lcfcn") val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) model = ms.load_best_model(main_dict) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) import ipdb; ipdb.set_trace() # breakpoint 4e08c360 // if os.path.exists(main_dict["path_history"]): history = ms.load_history(main_dict) print("# Trained Images:", len(history["trained_batch_names"]), "/", len(train_set)) print("# Epoch:", history["epoch"]) # print(pd.DataFrame(history["val"])) # val_names = [ms.extract_fname(fname).replace(".jpg", "") for fname in val_set.img_names] # assert np.in1d(history["trained_batch_names"], val_names).sum() == 0 import ipdb; ipdb.set_trace() # breakpoint ef2ce16b // # print(pd.DataFrame(history["val"])) # model, opt, _ = ms.init_model_and_opt(main_dict) model = ms.load_best_model(main_dict) ms.visBlobs(model, batch, predict_method="BestDice") ms.images(batch["images"], au.annList2mask(model.predict(batch, predict_method="loc")["annList"])["mask"], enlarge=1, denorm=1) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) ms.images(model, ms.get_batch(val_set, indices=[0])) model = ms.load_best_model(main_dict) model.extract_proposalMasks(ms.get_batch(train_set, indices=[1])) mask = model.visualize(ms.get_batch(val_set, indices=[1]) ) img = ms.f2l(ms.t2n((ms.denormalize(batch["images"])))).squeeze() segments_slic = slic(img, n_segments=250, compactness=10, sigma=1) results = model.predict(batch, "ewr") ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) model = ms.load_best_model(main_dict) model.visualize( ms.get_batch(val_set, indices=[0])) ms.visBlobs(model, ms.get_batch(val_set, indices=[0]) , with_void=True) ms.images(ms.gray2cmap(model(batch["images"].cuda())["mask"].squeeze())) h, w = batch["images"].shape[-2:] ms.images(ms.gray2cmap(deconvolve(ms.t2n(model(batch["images"].cuda())["cam"]), kernel(46,65, sigma=1.5)))) model = ms.load_latest_model(main_dict) opt = ms.create_opt(model, main_dict) val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) ms.visBlobs(model, batch) ms.visPretty(model, batch, alpha=0.0) if ms.model_exists(main_dict) and main_dict["reset"] != "reset": model = ms.load_latest_model(main_dict) opt = ms.create_opt(model, main_dict) history = ms.load_history(main_dict) import ipdb; ipdb.set_trace() # breakpoint 46fc0d2c // batch=ms.get_batch(val_set,indices=[2]) model.visualize(batch, cam_index=1) model.embedding_head.score_8s.bias dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[11]) # vis.visBlobs(model,ms.get_batch(val_set,indices=[14])) # dice_scores = val.valPascal(model, val_set, # predict_method="BestDice", # n_val=[11]) import ipdb; ipdb.set_trace() # breakpoint 54f5496d // dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[80,81]) vis.visBlobList(model, val_set,[1,2,3]) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=len(val_set)) obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=len(val_set)) vis.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint cbf2e6d1 // ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[630, 631, 632]) obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=100) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=len(val_set)) # val.valPascal(model, val_set, # predict_method="BestObjectness", # n_val=[10]) model.predict(batch, predict_method="BestDice") import ipdb; ipdb.set_trace() # breakpoint 797d17b4 // obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=30) vis.visBlobs(model, ms.get_batch(val_set,indices=[14]), predict_method="BestDice") model.predict(batch, predict_method="blobs") import ipdb; ipdb.set_trace() # breakpoint f4598264 // model.visaulize(batch) val.valPascal(model, val_set, predict_method="BestObjectness", n_val=[10]) vis.visBlobs(model, batch, predict_method="BestObjectness") import ipdb; ipdb.set_trace() # breakpoint f691d432 // ms.fit(model, ms.get_dataloader(val_set, batch_size=1, sampler_class=None), opt=opt, loss_function=main_dict["loss_dict"][loss_name]) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="UpperBound") history = ms.load_history(main_dict) # model = ms.load_best_model(main_dict) #print("Loaded best model...") else: model, opt, _ = ms.init_model_and_opt(main_dict) import ipdb; ipdb.set_trace() # breakpoint e26f9978 // obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=[2]) # ms.images(batch["images"], model.predict(batch, "blobs"), denorm=1) import ipdb; ipdb.set_trace() # breakpoint 08a2a8af // vis.visBlobs(model, batch) vis.visBlobs(model,ms.get_batch(val_set,indices=[14])) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10, visualize=True) #test_prm(model, batch) # test_prm(model, batch, i=1, j=0) # import ipdb; ipdb.set_trace() # breakpoint a860544a // # img2 = batch["images"].cuda().requires_grad_() # cues=rm.peak_response(model.backbone, img, peak_threshold=1) # batch = ms.get_batch(train_set,indices=[0]) # vis.visBatch(ms.get_batch(train_set,indices=[72])) #vis.visBlobs(model, batch) #ms.images(batch["images"], batch["points"], denorm=1, enlarge=1) # vis.visSplit(model, batch) #model.set_proposal(None); vis.visBlobs(model, batch) # vis.visBlobs(model, batch) #vis.visBlobList(model, val_set, [0, 1,2,3]) # for i in range(len(train_set)): print(i);x=train_set[i] # vis.visBlobs(model, batch) ''' mask = np.zeros(batch["images"].shape)[:,0] ms.images(batch["images"], mask, denorm=1) for i in range(400): mask += (i+1)*(rescale(sharp_proposals[i]["mask"],0.5)>0).astype(int) annList = vis.visAnnList(model, val_set, [34], cocoGt, predict_proposal="BestObjectness") ''' n_images = 10 batch = ms.get_batch(val_set,indices=[9]) import ipdb; ipdb.set_trace() # breakpoint 8d385ace // batch = ms.get_batch(val_set,indices=[50]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10) vis.visBlobs(model, ms.get_batch(val_set,indices=[3]), predict_method="BestDice") vis.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 99558393 // val.valPascal(model, val_set, predict_method="BestObjectness", n_val=10) val.valPascal(model, val_set, predict_method="BestDice", n_val=10) val.valPascal(model, val_set, predict_method="BestDice_no", n_val=[10]) batch = ms.get_batch(val_set,indices=[10]) model.predict(batch, predict_method="BestDice") model.predict(batch, predict_method="BestDice_no") vis.visBlobs(model, batch) vis.visBlobs(model, batch, predict_method=main_dict["predict_name"], cocoGt=val_set.cocoGt) val.valPascal(model, val_set, predict_method="BestObjectness", n_val=15) val.valPascal(model, val_set, predict_method="BoxSegment", n_val=15) val.valPascal(model, val_set, predict_method=main_dict["predict_name"], n_val=15) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=5) vis.visBlobs(model, batch) ms.images(bu.batch2propDict(ms.get_batch(val_set,indices=[1]))["foreground"]) batch = ms.get_batch(val_set,indices=[19]);ms.images(batch["images"],bu.batch2propDict(batch)["foreground"],denorm=1) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="GlanceBestBox") val.valPascal(model, val_set, predict_method="GlanceBestBox", n_val=15) val.valPascal(model, val_set, predict_method="BestDice", n_val=15) import ipdb; ipdb.set_trace() # breakpoint 01f8e3fa // val.valPascal(model, val_set, predict_method=main_dict["predict_name"], n_val=5) import ipdb; ipdb.set_trace() # breakpoint 78d3f03a // vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="BestObjectness") vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="UpperBound") ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) # ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], # opt=opt, epochs=1) # ms.fitData(model, val_set,opt=opt, loss_function=loss_dict[loss_name]) import ipdb; ipdb.set_trace() # breakpoint 51e4d47d // val.valPascal(model, val_set, predict_method="BestObjectness", n_val=n_images) val.valPascal(model, val_set, predict_method="UpperBound", n_val=len(val_set)) # vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="BestObjectness") n_images = len(val_set) for e in range(5): for i in range(n_images): i_rand = np.random.randint(n_images) i_rand = i # print print(i_rand) batch = ms.get_batch(train_set,indices=[i_rand]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=1) #cocoGt = ms.load_voc2012_val() cocoGt = ms.load_cp_val() ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) # vis.visAnns(model, batch, cocoGt, predict_proposal="BestBoundary") import ipdb; ipdb.set_trace() # breakpoint 6f37a744 // if 1: n_images = 30 resList = [] for k in range(5): for i in range(n_images): print(i) batch = ms.get_batch(val_set,indices=[i]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=2) resList +=[val.valPascal(model, val_set, predict_proposal="excitementInside", n_val=n_images)] # excitementInside ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) import ipdb; ipdb.set_trace() # breakpoint 14451165 // ms.eval_cocoDt(main_dict, predict_proposal="UB_Sharp_withoutVoid") import ipdb; ipdb.set_trace() # breakpoint f3f0fda5 // vis.visAnns(model, batch, cocoGt, predict_proposal="BestObjectness") annList = vis.visAnnList(model, val_set, [1,2], cocoGt, predict_proposal="BestObjectness") annList = ms.load_annList(main_dict, predict_proposal="BestObjectness") ms.eval_cocoDt(main_dict, predict_proposal="UB_Sharp_withoutVoid") # score = np.array([s["score"] for s in annList]) batch = ms.get_batch(val_set,indices=[2]) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict["water_loss"], opt=opt, epochs=100) ms.fitBatch(model, batch, loss_function=loss_dict["point_loss"], opt=opt, epochs=100) vis.visSplit(model, batch, 0,"water") ''' val.valPascal(model, val_set, predict_proposal="excitementInside", n_val=30) ''' # model.save(batch, path="/mnt/home/issam/Summaries/tmp.png") # batch = ms.get_batch(train_set,indices=[52]) # torch.save(model.state_dict(), "/mnt/home/issam/Saves/model_split.pth") vis.save_images(model, val_set, #indices=np.random.randint(0, len(val_set), 200), indices=np.arange(5,200), path="/mnt/home/issam/Summaries/{}_val/".format(main_dict["dataset_name"])) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10) ms.valBatch(model, batch, metric_dict[metric_name]) ms.validate(model, val_set, metric_class=metric_class) # ms.visBlobs(model, tr_batch) # model.predict(tr_batch,"counts") for i in range(292, 784): batch = ms.get_batch(val_set, indices=[i]) try: score = ms.valBatch(model, batch, metric_dict[metric_name]) except: print(i, batch['name']) import ipdb; ipdb.set_trace() # breakpoint effaca86 // ms.visBlobs(model, batch) if 1: resList = [] for k in range(5): for i in range(10): print(i) batch = ms.get_batch(val_set,indices=[i]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=1) resList +=[val.valPascal(model, val_set, predict_proposal="BestObjectness", n_val=10)] val.valPascal(model, val_set, predict_proposal="BestBoundary", n_val=30) val.valPascal(model, val_set, predict_proposal="BestObjectness", n_val=list(range(len(val_set)))) #model.predict_proposals(batch) batch = ms.get_batch(val_set,indices=[35]) ms.images(batch["original"], model.predict_proposals(batch, which=0)) ms.images(ms.get_batch(train_set, [300])["original"], train_set.get_proposal(300, indices=[0,1])) # from spn import object_localization #cm = model.class_activation_map(batch["images"].cuda()) # model.display(ms.get_batch(train_set,indices=[3])) # ms. ms.images(255*np.abs( model.predict(ms.get_batch(train_set,indices=[3]), "saliency"))) sal = model.predict(ms.get_batch(train_set,indices=[3]), "saliency") ms.images(np.abs(sal)*255) import ipdb; ipdb.set_trace() # breakpoint c7ca398d // for i in range(1): ms.fit(model, ms.get_dataloader(train_set, batch_size=1, sampler_class=None), loss_function=main_dict["loss_dict"][loss_name], metric_class=main_dict["metric_dict"][metric_name], opt=opt, val_batch=False) ms.fitQuick(model, train_set, loss_name=loss_name, metric_name=metric_name,opt=opt) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=100) ms.valBatch(model, batch, metric_dict[metric_name]) ms.visBlobs(model, ms.get_batch(train_set, indices=[3]) ) ms.visBlobs(model, batch) #model = ms.load_best_model(main_dict) #metrics.compute_ap(model, batch) #val.val_cm(main_dict) batch = ms.visBlobsQ(model, val_set, 8) import ipdb; ipdb.set_trace() # breakpoint 5cd16f8f // ul.visSp_prob(model, batch) 3 ms.images(batch["images"], aa, denorm=1) ms.visBlobs(model, batch) ul.vis_nei(model,batch,topk=1000, thresh=0.8,bg=True) ul.vis_nei(model,batch,topk=1000, bg=False) ms.fitQuick(model, train_set, batch_size=batch_size,loss_name=loss_name, metric_name=metric_name) val.validate(model, val_set, metric_name=main_dict["metric_name"], batch_size=main_dict["val_batchsize"]) ms.fitQuick(model, train_set, batch_size=batch_size,loss_name=loss_name, metric_name=metric_name) ms.fitBatch(model, batch, loss_name=loss_name, opt=opt, epochs=100) val.valBatch(model, batch_train, metric_name=metric_name) ms.fitBatch(model, batch, loss_function=losses.expand_loss, opt=opt, epochs=100) ms.visBlobs(model, batch) ms.visWater(model,batch) ms.validate(model, val_set, metric_class=metric_class) import ipdb; ipdb.set_trace() # breakpoint ddad840d // model, opt, _ = ms.init_model_and_opt(main_dict) ms.fitBatch(model, batch, loss_name="water_loss_B", opt=opt, epochs=100) ms.fitQuick(model, train_set, loss_name=loss_name, metric_name=metric_name) # ms.images(batch["images"], batch["labels"], denorm=1) # ms.init.LOSS_DICT["water_loss"](model, batch) import ipdb; ipdb.set_trace() # breakpoint f304b83a // ms.images(batch["images"], model.predict(batch, "labels"), denorm=1) val.valBatch(model, batch, metric_name=main_dict["metric_name"]) ms.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 074c3921 // ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt, epochs=100) for e in range(10): if e == 0: scoreList = [] scoreList += [ms.fitIndices(model, train_set, loss_name=main_dict["loss_name"], batch_size=batch_size, metric_name=metric_name, opt=opt, epoch=e, num_workers=1, ind=np.random.randint(0, len(train_set), 32))] ms.fitData(model, train_set, opt=opt, epochs=10) um.reload(sp);water=sp.watersplit(model, batch).astype(int);ms.images(batch["images"], water, denorm=1) ms.visBlobs(model, batch) ms.images(batch["images"], ul.split_crf(model, batch),denorm=1) losses.dense_crf(model, batch, alpha=61, beta=31, gamma=1) ms.visBlobs(model, batch) model.blob_mode = "superpixels" #---------------------- # Vis Blobs ms.visBlobs(model, batch) ms.images(batch["images"],model.predict(batch, "labels"), denorm=1) # Vis Blobs #ms.visBlobs(model, batch) ms.images(batch["images"], sp.watersplit_test(model, batch).astype(int), denorm=1) #=sp.watersplit(model, batch).astype(int); # Vis CRF ms.images(batch["images"], ul.dense_crf(model, batch, alpha=5,gamma=5,beta=5,smooth=False), denorm=1) ms.images(batch["images"], ul.dense_crf(model, batch), denorm=1) # Eval val.valBatch(model, batch, metric_name=main_dict["metric_name"]) import ipdb; ipdb.set_trace() # breakpoint e9cd4eb0 // model = ms.load_best_model(main_dict) val.valBatch(model, batch, metric_name=main_dict["metric_name"]) ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt) ms.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 2167961a // batch=ms.get_batch(train_set, indices=[5]) ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt) ms.images(batch["images"], model.predict(batch, "probs"), denorm=1) ms.visBlobs(model, batch) val.validate(model, val_set, metric_name=main_dict["metric_name"]) val.validate(model, val_set, metric_name="SBD")
def main(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--exp') parser.add_argument('-b', '--borgy', default=0, type=int) parser.add_argument('-br', '--borgy_running', default=0, type=int) parser.add_argument('-m', '--mode', default="summary") parser.add_argument('-r', '--reset', default="None") parser.add_argument('-s', '--status', type=int, default=0) parser.add_argument('-k', '--kill', type=int, default=0) parser.add_argument('-g', '--gpu', type=int) parser.add_argument('-c', '--configList', nargs="+", default=None) parser.add_argument('-l', '--lossList', nargs="+", default=None) parser.add_argument('-d', '--datasetList', nargs="+", default=None) parser.add_argument('-metric', '--metricList', nargs="+", default=None) parser.add_argument('-model', '--modelList', nargs="+", default=None) parser.add_argument('-p', '--predictList', nargs="+", default=None) args = parser.parse_args() if args.borgy or args.kill: global_prompt = input("Do all? \n(y/n)\n") # SEE IF CUDA IS AVAILABLE assert torch.cuda.is_available() print("CUDA: %s" % torch.version.cuda) print("Pytroch: %s" % torch.__version__) mode = args.mode exp_name = args.exp exp_dict = experiments.get_experiment_dict(args, exp_name) pp_main = None results = {} # Get Main Class project_name = os.path.realpath(__file__).split("/")[-2] MC = ms.MainClass(path_models="models", path_datasets="datasets", path_metrics="metrics/metrics.py", path_losses="losses/losses.py", path_samplers="addons/samplers.py", path_transforms="addons/transforms.py", path_saves="/mnt/projects/counting/Saves/main/", project=project_name) key_set = set() for model_name, config_name, metric_name, dataset_name, loss_name in product( exp_dict["modelList"], exp_dict["configList"], exp_dict["metricList"], exp_dict["datasetList"], exp_dict["lossList"]): # if model_name in ["LC_RESFCN"]: # loss_name = "water_loss" config = configs.get_config_dict(config_name) key = ("{} - {} - {}".format(model_name, config_name, loss_name), "{}_({})".format(dataset_name, metric_name)) if key in key_set: continue key_set.add(key) main_dict = MC.get_main_dict(mode, dataset_name, model_name, config_name, config, args.reset, exp_dict["epochs"], metric_name, loss_name) main_dict["predictList"] = exp_dict["predictList"] if mode == "paths": print("\n{}_({})".format(dataset_name, model_name)) print(main_dict["path_best_model"]) # print( main_dict["exp_name"]) predictList_str = ' '.join(exp_dict["predictList"]) if args.status: results[key] = borgy.borgy_status(mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if args.kill: results[key] = borgy.borgy_kill(mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if args.borgy: results[key] = borgy.borgy_submit(project_name, global_prompt, mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if mode == "debug": debug.debug(main_dict) if mode == "validate": validate.validate(main_dict) if mode == "save_gam_points": train_set, _ = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) for i in range(len(train_set)): print(i, "/", len(train_set)) batch = ms.get_batch(train_set, [i]) fname = train_set.path + "/gam_{}.pkl".format( batch["index"].item()) points = model.get_points(batch) ms.save_pkl(fname, points) import ipdb ipdb.set_trace() # breakpoint ee49ab9f // if mode == "save_prm_points": train_set, _ = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) for i in range(len(train_set)): print(i, "/", len(train_set)) batch = ms.get_batch(train_set, [i]) fname = "{}/prm{}.pkl".format(batch["path"][0], batch["name"][0]) points = model.get_points(batch) ms.save_pkl(fname, points) import ipdb ipdb.set_trace() # breakpoint 679ce152 // # train_set, _ = au.load_trainval(main_dict) # model = ms.load_best_model(main_dict) # for i in range(len(train_set)): # print(i, "/", len(train_set)) # batch = ms.get_batch(train_set, [i]) # fname = train_set.path + "/gam_{}.pkl".format(batch["index"].item()) # points = model.get_points(batch) # ms.save_pkl(fname, points) # if mode == "pascal_annList": # data_utils.pascal2lcfcn_points(main_dict) if mode == "upperboundmasks": import ipdb ipdb.set_trace() # breakpoint 02fac8ce // results = au.test_upperboundmasks(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "model": results = au.test_model(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "upperbound": results = au.test_upperbound(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "MUCov": gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset) # model = ms.load_best_model(main_dict) fname = main_dict["path_save"] + "/pred_annList.pkl" if not os.path.exists(fname): _, val_set = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) pred_annList = au.dataset2annList(model, val_set, predict_method="BestDice", n_val=None) ms.save_pkl(fname, pred_annList) else: pred_annList = ms.load_pkl(fname) import ipdb ipdb.set_trace() # breakpoint 527a7f36 // pred_annList = au.load_predAnnList(main_dict, predict_method="BestObjectness") # 0.31 best objectness pred_annList = # 0.3482122335421256 # au.get_MUCov(gtAnnDict, pred_annList) au.get_SBD(gtAnnDict, pred_annList) if mode == "dic_sbd": import ipdb ipdb.set_trace() # breakpoint 4af08a17 // if mode == "point_mask": from datasets import base_dataset import ipdb ipdb.set_trace() # breakpoint 7fd55e0c // _, val_set = ms.load_trainval(main_dict) batch = ms.get_batch(val_set, [1]) model = ms.load_best_model(main_dict) pred_dict = model.LCFCN.predict(batch) # ms.pretty_vis(batch["images"], base_dataset.batch2annList(batch)) ms.images(ms.pretty_vis( batch["images"], model.LCFCN.predict(batch, predict_method="original")["annList"]), win="blobs") ms.images(ms.pretty_vis(batch["images"], base_dataset.batch2annList(batch)), win="erww") ms.images(batch["images"], batch["points"], denorm=1, enlarge=1, win="e21e") import ipdb ipdb.set_trace() # breakpoint ab9240f0 // if mode == "lcfcn_output": import ipdb ipdb.set_trace() # breakpoint 7fd55e0c // gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset) if mode == "load_gtAnnDict": _, val_set = au.load_trainval(main_dict) gtAnnDict = au.load_gtAnnDict(val_set) # gtAnnClass = COCO(gtAnnDict) # au.assert_gtAnnDict(main_dict, reset=None) # _,val_set = au.load_trainval(main_dict) # annList_path = val_set.annList_path # fname_dummy = annList_path.replace(".json","_best.json") # predAnnDict = ms.load_json(fname_dummy) import ipdb ipdb.set_trace() # breakpoint 100bfe1b // pred_annList = ms.load_pkl(main_dict["path_best_annList"]) # model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) batch = ms.get_batch(val_set, [1]) import ipdb ipdb.set_trace() # breakpoint 2310bb33 // model = ms.load_best_model(main_dict) pred_dict = model.predict(batch, "BestDice", "mcg") ms.images(batch["images"], au.annList2mask(pred_dict["annList"])["mask"], denorm=1) # pointList2UpperBoundMCG pred_annList = au.load_predAnnList(main_dict, predict_method="BestDice", proposal_type="mcg", reset="reset") # annList = au.pointList2UpperBoundMCG(batch["lcfcn_pointList"], batch)["annList"] ms.images(batch["images"], au.annList2mask(annList)["mask"], denorm=1) pred_annList = au.load_BestMCG(main_dict, reset="reset") # pred_annList = au.dataset2annList(model, val_set, # predict_method="BestDice", # n_val=None) au.get_perSizeResults(gtAnnDict, pred_annList) if mode == "vis": _, val_set = au.load_trainval(main_dict) batch = ms.get_batch(val_set, [3]) import ipdb ipdb.set_trace() # breakpoint 05e6ef16 // vis.visBaselines(batch) model = ms.load_best_model(main_dict) vis.visBlobs(model, batch) if mode == "qual": model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) path = "/mnt/home/issam/Summaries/{}_{}".format( dataset_name, model_name) try: ms.remove_dir(path) except: pass n_images = len(val_set) base = "{}_{}".format(dataset_name, model_name) for i in range(50): print(i, "/10", "- ", base) index = np.random.randint(0, n_images) batch = ms.get_batch(val_set, [index]) if len(batch["lcfcn_pointList"]) == 0: continue image = vis.visBlobs(model, batch, return_image=True) # image_baselines = vis.visBaselines(batch, return_image=True) # imgAll = np.concatenate([image, image_baselines], axis=1) fname = path + "/{}_{}.png".format(i, base) ms.create_dirs(fname) ms.imsave(fname, image) if mode == "test_baselines": import ipdb ipdb.set_trace() # breakpoint b51c5b1f // results = au.test_baselines(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "test_best": au.test_best(main_dict) if mode == "qualitative": au.qualitative(main_dict) if mode == "figure1": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*") # vidList.sort() # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void") batch = ms.get_batch(val_set, [68]) bestdice = ms.visPretty(model, batch=batch, with_void=0, win="no_void") blobs = ms.visPretty(model, batch=batch, predict_method="blobs", with_void=0, win="no_void") ms.images(bestdice, win="BestDice") ms.images(blobs, win="Blobs") ms.images(batch["images"], denorm=1, win="Image") ms.images(batch["images"], batch["points"], enlarge=1, denorm=1, win="Points") import ipdb ipdb.set_trace() # breakpoint cf4bb3d3 // if mode == "video2": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*") # vidList.sort() index = 0 for i in range(len(val_set)): # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void") batch = ms.get_batch(val_set, [i]) pretty_image = ms.visPretty(model, batch=batch, with_void=0, win="no_void") # pred_dict = model.predict(batch, predict_method="BestDice") path_summary = main_dict["path_summary"] ms.create_dirs(path_summary + "/tmp") ms.imsave( path_summary + "vid_mask_{}.png".format(index), ms.get_image(batch["images"], batch["points"], enlarge=1, denorm=1)) index += 1 ms.imsave(path_summary + "vid_mask_{}.png".format(index), pretty_image) index += 1 # ms.imsave(path_summary+"vid1_full_{}.png".format(i), ms.get_image(img, pred_dict["blobs"], denorm=1)) print(i, "/", len(val_set)) if mode == "video": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) # _, val_set = au.load_trainval(main_dict) proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" vidList = glob( "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*" ) vidList.sort() for i, img_path in enumerate(vidList): image = Image.open(img_path).convert('RGB') image = image.resize((1200, 600), Image.BILINEAR) img, _ = transforms.Tr_WTP_NoFlip()([image, image]) pred_dict = model.predict( { "images": img[None], "split": ["test"], "resized": torch.FloatTensor([1]), "name": [ms.extract_fname(img_path)], "proposals_path": [proposals_path] }, predict_method="BestDice") path_summary = main_dict["path_summary"] ms.create_dirs(path_summary + "/tmp") ms.imsave(path_summary + "vid1_mask_{}.png".format(i), ms.get_image(pred_dict["blobs"])) ms.imsave(path_summary + "vid1_full_{}.png".format(i), ms.get_image(img, pred_dict["blobs"], denorm=1)) print(i, "/", len(vidList)) if mode == "5_eval_BestDice": gtAnnDict = au.load_gtAnnDict(main_dict) gtAnnClass = COCO(gtAnnDict) results = au.assert_gtAnnDict(main_dict, reset=None) if mode == "cp_annList": ms.dataset2cocoformat(dataset_name="CityScapes") if mode == "pascal2lcfcn_points": data_utils.pascal2lcfcn_points(main_dict) if mode == "cp2lcfcn_points": data_utils.cp2lcfcn_points(main_dict) if mode == "train": train.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "train_only": train.main(main_dict, train_only=True) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "sharpmask2psfcn": for split in ["train", "val"]: root = "/mnt/datasets/public/issam/COCO2014/ProposalsSharp/" path = "{}/sharpmask/{}/jsons/".format(root, split) jsons = glob(path + "*.json") propDict = {} for k, json in enumerate(jsons): print("{}/{}".format(k, len(jsons))) props = ms.load_json(json) for p in props: if p["image_id"] not in propDict: propDict[p["image_id"]] = [] propDict[p["image_id"]] += [p] for k in propDict.keys(): fname = "{}/{}.json".format(root, k) ms.save_json(fname, propDict[k]) if mode == "cp2coco": import ipdb ipdb.set_trace() # breakpoint f2eb9e70 // dataset2cocoformat.cityscapes2cocoformat(main_dict) # train.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "train_lcfcn": train_lcfcn.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "summary": try: history = ms.load_history(main_dict) # if predictList_str == "MAE": # results[key] = "{}/{}: {:.2f}".format(history["best_model"]["epoch"], # history["epoch"], # history["best_model"][metric_name]) # else: val_dict = history["val"][-1] val_dict = history["best_model"] iou25 = val_dict["0.25"] iou5 = val_dict["0.5"] iou75 = val_dict["0.75"] results[key] = "{}/{}: {:.1f} - {:.1f} - {:.1f}".format( val_dict["epoch"], history["epoch"], iou25 * 100, iou5 * 100, iou75 * 100) # if history["val"][-1]["epoch"] != history["epoch"]: # results[key] += " | Val {}".format(history["epoch"]) try: results[key] += " | {}/{}".format( len(history["trained_batch_names"]), history["train"][-1]["n_samples"]) except: pass except: pass if mode == "vals": history = ms.load_history(main_dict) for i in range(1, len(main_dict["predictList"]) + 1): if len(history['val']) == 0: res = "NaN" continue else: res = history["val"][-i] map50 = res["map50"] map75 = res["map75"] # if map75 < 1e-3: # continue string = "{} - {} - map50: {:.2f} - map75: {:.2f}".format( res["epoch"], res["predict_name"], map50, map75) key_tmp = list(key).copy() key_tmp[1] += " {} - {}".format(metric_name, res["predict_name"]) results[tuple(key_tmp)] = string # print("map75", pd.DataFrame(history["val"])["map75"].max()) # df = pd.DataFrame(history["vals"][:20])["water_loss_B"] # print(df) try: print(ms.dict2frame(results)) except: print("Results not printed...")
def train(exp_dict): history = ms.load_history(exp_dict) # Source src_trainloader, src_valloader = ms.load_src_loaders(exp_dict) ##################### ## Train source model ##################### src_model, src_opt = ms.load_model_src(exp_dict) # Train Source for e in range(history["src_train"][-1]["epoch"], exp_dict["src_epochs"]): train_dict = ts.fit_src(src_model, src_trainloader, src_opt) loss = train_dict["loss"] print("Source ({}) - Epoch [{}/{}] - loss={:.2f}".format( type(src_trainloader).__name__, e, exp_dict["src_epochs"], loss)) history["src_train"] += [{"loss": loss, "epoch": e}] if e % 50 == 0: ms.save_model_src(exp_dict, history, src_model, src_opt) # Test Source src_acc = test.validate(src_model, src_model, src_trainloader, src_valloader) print("{} TEST Accuracy = {:2%}\n".format(exp_dict["src_dataset"], src_acc)) history["src_acc"] = src_acc ms.save_model_src(exp_dict, history, src_model, src_opt) ##################### ## Train Target model ##################### tgt_trainloader, tgt_valloader = ms.load_tgt_loaders(exp_dict) # load models tgt_model, tgt_opt, disc_model, disc_opt = ms.load_model_tgt(exp_dict) tgt_model.load_state_dict(src_model.state_dict()) for e in range(history["tgt_train"][-1]["epoch"], exp_dict["tgt_epochs"] + 1): # 1. Train disc if exp_dict["options"]["disc"] == True: tg.fit_disc(src_model, tgt_model, disc_model, src_trainloader, tgt_trainloader, opt_tgt=tgt_opt, opt_disc=disc_opt, epochs=3, verbose=0) acc_tgt = test.validate(src_model, tgt_model, src_trainloader, tgt_valloader) history["tgt_train"] += [{ "epoch": e, "acc_src": src_acc, "acc_tgt": acc_tgt, "n_train - " + exp_dict["src_dataset"]: len(src_trainloader.dataset), "n_train - " + exp_dict["tgt_dataset"]: len(tgt_trainloader.dataset), "n_test - " + exp_dict["tgt_dataset"]: len(tgt_valloader.dataset) }] print("\n>>> Methods: {} - Source: {} -> Target: {}".format( None, exp_dict["src_dataset"], exp_dict["tgt_dataset"])) print(pd.DataFrame([history["tgt_train"][-1]])) if (e % 5) == 0: ms.save_model_tgt(exp_dict, history, tgt_model, tgt_opt, disc_model, disc_opt) #ms.test_latest_model(exp_dict) # 2. Train center-magnet if exp_dict["options"]["center"] == True: tg.fit_center(src_model, tgt_model, src_trainloader, tgt_trainloader, tgt_opt, epochs=1) ms.save_model_tgt(exp_dict, history, tgt_model, tgt_opt, disc_model, disc_opt) exp_dict["reset_src"] = 0 exp_dict["reset_tgt"] = 0 ms.test_latest_model(exp_dict)