def load_trainval(main_dict): path_datasets = "datasets" path_transforms = 'addons/transforms.py' dataset_dict = ms.get_module_classes(path_datasets) transform_dict = ms.get_functions(path_transforms) dataset_name = main_dict["dataset_name"] train_set, val_set = ms.load_trainval({"dataset_name":dataset_name, "path_datasets":path_datasets, "trainTransformer":"Tr_WTP_NoFlip", "testTransformer":"Te_WTP", "dataset_options":{}, "dataset_dict":dataset_dict, "transform_dict":transform_dict}) annList_path = val_set.path + "/annotations/{}_gt_annList.json".format(val_set.split) val_set.annList_path = annList_path return train_set, val_set
def pascal2cocoformat(): dataset = ms.load_trainval({"dataset_name": "VOC"}) fname = "/mnt/datasets/public/issam/VOCdevkit/annotations/" fname += "instances_val2012.json" tmp = ms.load_json("/mnt/datasets/public/issam/" "VOCdevkit/annotations/pascal_val2012.json") ann_json = {} ann_json["categories"] = tmp["categories"] ann_json["type"] = "instances" # Images imageList = [] annList = [] id = 1 for i in range(len(dataset)): print("{}/{}".format(i, len(dataset))) batch = dataset[i] image_id = int(batch["name"]) height, width = batch["images"].shape[-2:] imageList += [{ "file_name": batch["name"] + ".jpg", "height": height, "width": width, "id": image_id }] maskObjects = batch["maskObjects"] maskClasses = batch["maskClasses"] n_objects = maskObjects[maskObjects != 255].max() for obj_id in range(1, n_objects + 1): if obj_id == 0: continue binmask = (maskObjects == obj_id) segmentation = maskUtils.encode(np.asfortranarray(ms.t2n(binmask))) segmentation["counts"] = segmentation["counts"].decode("utf-8") uniques = (binmask.long() * maskClasses).unique() uniques = uniques[uniques != 0] assert len(uniques) == 1 category_id = uniques[0].item() annList += [{ "segmentation": segmentation, "iscrowd": 0, # "bbox":maskUtils.toBbox(segmentation).tolist(), "area": int(maskUtils.area(segmentation)), "id": id, "image_id": image_id, "category_id": category_id }] id += 1 ann_json["annotations"] = annList ann_json["images"] = imageList ms.save_json(fname, ann_json) anns = ms.load_json(fname) fname_dummy = fname.replace(".json", "_best.json") annList = anns["annotations"] for a in annList: a["score"] = 1 ms.save_json(fname_dummy, annList)
def main(main_dict, train_only=False): ms.print_welcome(main_dict) # EXTRACT VARIABLES reset = main_dict["reset"] epochs = main_dict["epochs"] = 100 batch_size = main_dict["batch_size"] sampler_name = main_dict["sampler_name"] verbose = main_dict["verbose"] loss_name = main_dict["loss_name"] metric_name = main_dict["metric_name"] metric_class = main_dict["metric_dict"][metric_name] loss_function = main_dict["loss_dict"][loss_name] predictList = main_dict["predictList"] # Assert everything is available ## Sharp proposals ## LCFCN points ## gt_annDict # Dataset train_set, val_set = ms.load_trainval(main_dict) train_set[0] # Model if reset == "reset" or not ms.model_exists(main_dict): model, opt, history = ms.init_model_and_opt(main_dict, train_set) print("TRAINING FROM SCRATCH EPOCH: %d/%d" % (history["epoch"], epochs)) else: model, opt, history = ms.load_latest_model_and_opt( main_dict, train_set) print("RESUMING EPOCH %d/%d" % (history["epoch"], epochs)) # Get Dataloader trainloader = ms.get_dataloader( dataset=train_set, batch_size=batch_size, sampler_class=main_dict["sampler_dict"][sampler_name]) # SAVE HISTORY history["epoch_size"] = len(trainloader) if "trained_batch_names" in history: model.trained_batch_names = set(history["trained_batch_names"]) ms.save_pkl(main_dict["path_history"], history) # START TRAINING start_epoch = history["epoch"] predict_name = predictList[0] for epoch in range(start_epoch + 1, epochs): # %%%%%%%%%%% 1. Training PHASE %%%%%%%%%%%%" history = training_phase(history, main_dict, model, trainloader, opt, loss_function, verbose, epoch) # %%%%%%%%%%% 2. VALIDATION PHASE %%%%%%%%%%%%" if (epoch % 5) == 0: history = validation_phase_mAP(history, main_dict, model, val_set, predict_name, epoch) ms.save_pkl(main_dict["path_history"], history)
def validate(main_dict, train_only=False): ms.print_welcome(main_dict) # EXTRACT VARIABLES reset = main_dict["reset"] epochs = main_dict["epochs"] batch_size = main_dict["batch_size"] sampler_name = main_dict["sampler_name"] verbose = main_dict["verbose"] loss_name = main_dict["loss_name"] metric_name = main_dict["metric_name"] epoch2val = main_dict["epoch2val"] val_batchsize = main_dict["val_batchsize"] metric_class = main_dict["metric_dict"][metric_name] loss_function = main_dict["loss_dict"][loss_name] predictList = main_dict["predictList"] # Assert everything is available ## Sharp proposals ## LCFCN points ## gt_annDict # Dataset train_set, val_set = ms.load_trainval(main_dict) train_set[0] # Model if reset == "reset" or not ms.model_exists(main_dict): model, opt, history = ms.init_model_and_opt(main_dict, train_set) print("TRAINING FROM SCRATCH EPOCH: %d/%d" % (history["epoch"], epochs)) else: model, opt, history = ms.load_latest_model_and_opt( main_dict, train_set) print("RESUMING EPOCH %d/%d" % (history["epoch"], epochs)) # Get Dataloader trainloader = ms.get_dataloader( dataset=train_set, batch_size=batch_size, sampler_class=main_dict["sampler_dict"][sampler_name]) # SAVE HISTORY history["epoch_size"] = len(trainloader) if "trained_batch_names" in history: model.trained_batch_names = set(history["trained_batch_names"]) # START TRAINING start_epoch = history["epoch"] predict_name = predictList[0] if len(history["val"]) == 0: last_validation = 1 else: last_validation = history["val"][-1]["epoch"] for epoch in range(start_epoch + 1, epochs): import ipdb ipdb.set_trace() # breakpoint a3e81a5d // # %%%%%%%%%%% 2. VALIDATION PHASE %%%%%%%%%%%%" validation_phase_mAP(history, main_dict, model, val_set, predict_name, epoch) import ipdb ipdb.set_trace() # breakpoint 952cb52f //
def debug(main_dict): #ud.debug_sheep(main_dict) loss_dict = main_dict["loss_dict"] metric_dict = main_dict["metric_dict"] metric_name = main_dict["metric_name"] metric_class = main_dict["metric_dict"][metric_name] loss_name = main_dict["loss_name"] batch_size = main_dict["batch_size"] ms.print_welcome(main_dict) train_set, val_set = ms.load_trainval(main_dict) #test_set = ms.load_test(main_dict) # train_set, val_set = ms.load_trainval(main_dict) #batch=ms.get_batch(test_set, indices=[509]) # batch=ms.get_batch(val_set, indices=[0, 4, 9]) # b2 = um.get_batch(val_set, indices=[4]) # ms.fitBatch(model, batch, loss_name="image_loss", opt=opt, epochs=100) # batch_train=ms.get_batch(val_set, indices=[15]) # batch=ms.get_batch(val_set, indices=[15]) # tr_batch=ms.get_batch(val_set, indices=[2]) #batch=ms.get_batch(val_set, indices=[1,2,3,12,13,14,16,17,67,68,70]) # batch=ms.get_batch(val_set,indices=[300]) # ms.images(batch["images"], batch["points"],denorm=1,enlarge=1) # for i in range(len(val_set)): # batch=ms.get_batch(val_set,indices=[i]) # sharp_proposals = prp.Sharp_class(batch) # sharp_proposals = prp.Sharp_class(batch) # pointList = bu.mask2pointList(batch["points"])["pointList"] # propDict = bu.pointList2propDict(pointList, sharp_proposals, thresh=0.5) # for i in range(len(train_set)): # print(i) # sharp_proposals = prp.Sharp_class(ms.get_batch(train_set,indices=[i])) # d2c.pascal2cocoformat(main_dict) # model, opt, _ = ms.init_model_and_opt(main_dict) # # history = ms.load_history(main_dict) # print(pd.DataFrame(history["val"])) # print(pd.DataFrame(history["train"])[loss_name]) model, opt, _ = ms.init_model_and_opt(main_dict) import ipdb; ipdb.set_trace() # breakpoint b87b640d // batch = ms.get_batch(val_set, indices=[1]) ms.visBlobs(model, batch, predict_method="BestDice") import ipdb; ipdb.set_trace() # breakpoint a18a7b92 // plants.save_test_to_h5(main_dict) model = ms.load_best_model(main_dict) if 1: import train train.validation_phase_mIoU(ms.load_history(main_dict), main_dict, model, val_set, "BestDice", 0) test_set = ms.load_test(main_dict) batch = ms.get_batch(test_set, indices=[4]) # model, opt, _ = ms.init_model_and_opt(main_dict) batch = ms.get_batch(val_set, indices=[4]) ms.images(batch["images"], model.predict( batch , predict_method="BestDice", use_trans=1, sim_func=au.compute_dice)["blobs"], denorm=1) val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) model = ms.load_lcfcn(train_set, mode="lcfcn") val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) model = ms.load_best_model(main_dict) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) import ipdb; ipdb.set_trace() # breakpoint 4e08c360 // if os.path.exists(main_dict["path_history"]): history = ms.load_history(main_dict) print("# Trained Images:", len(history["trained_batch_names"]), "/", len(train_set)) print("# Epoch:", history["epoch"]) # print(pd.DataFrame(history["val"])) # val_names = [ms.extract_fname(fname).replace(".jpg", "") for fname in val_set.img_names] # assert np.in1d(history["trained_batch_names"], val_names).sum() == 0 import ipdb; ipdb.set_trace() # breakpoint ef2ce16b // # print(pd.DataFrame(history["val"])) # model, opt, _ = ms.init_model_and_opt(main_dict) model = ms.load_best_model(main_dict) ms.visBlobs(model, batch, predict_method="BestDice") ms.images(batch["images"], au.annList2mask(model.predict(batch, predict_method="loc")["annList"])["mask"], enlarge=1, denorm=1) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) ms.images(model, ms.get_batch(val_set, indices=[0])) model = ms.load_best_model(main_dict) model.extract_proposalMasks(ms.get_batch(train_set, indices=[1])) mask = model.visualize(ms.get_batch(val_set, indices=[1]) ) img = ms.f2l(ms.t2n((ms.denormalize(batch["images"])))).squeeze() segments_slic = slic(img, n_segments=250, compactness=10, sigma=1) results = model.predict(batch, "ewr") ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) model = ms.load_best_model(main_dict) model.visualize( ms.get_batch(val_set, indices=[0])) ms.visBlobs(model, ms.get_batch(val_set, indices=[0]) , with_void=True) ms.images(ms.gray2cmap(model(batch["images"].cuda())["mask"].squeeze())) h, w = batch["images"].shape[-2:] ms.images(ms.gray2cmap(deconvolve(ms.t2n(model(batch["images"].cuda())["cam"]), kernel(46,65, sigma=1.5)))) model = ms.load_latest_model(main_dict) opt = ms.create_opt(model, main_dict) val_dict, pred_annList = au.validate(model, val_set, predict_method="BestDice", n_val=None, return_annList=True) ms.visBlobs(model, batch) ms.visPretty(model, batch, alpha=0.0) if ms.model_exists(main_dict) and main_dict["reset"] != "reset": model = ms.load_latest_model(main_dict) opt = ms.create_opt(model, main_dict) history = ms.load_history(main_dict) import ipdb; ipdb.set_trace() # breakpoint 46fc0d2c // batch=ms.get_batch(val_set,indices=[2]) model.visualize(batch, cam_index=1) model.embedding_head.score_8s.bias dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[11]) # vis.visBlobs(model,ms.get_batch(val_set,indices=[14])) # dice_scores = val.valPascal(model, val_set, # predict_method="BestDice", # n_val=[11]) import ipdb; ipdb.set_trace() # breakpoint 54f5496d // dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[80,81]) vis.visBlobList(model, val_set,[1,2,3]) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=len(val_set)) obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=len(val_set)) vis.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint cbf2e6d1 // ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=[630, 631, 632]) obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=100) dice_scores = val.valPascal(model, val_set, predict_method="BestDice", n_val=len(val_set)) # val.valPascal(model, val_set, # predict_method="BestObjectness", # n_val=[10]) model.predict(batch, predict_method="BestDice") import ipdb; ipdb.set_trace() # breakpoint 797d17b4 // obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=30) vis.visBlobs(model, ms.get_batch(val_set,indices=[14]), predict_method="BestDice") model.predict(batch, predict_method="blobs") import ipdb; ipdb.set_trace() # breakpoint f4598264 // model.visaulize(batch) val.valPascal(model, val_set, predict_method="BestObjectness", n_val=[10]) vis.visBlobs(model, batch, predict_method="BestObjectness") import ipdb; ipdb.set_trace() # breakpoint f691d432 // ms.fit(model, ms.get_dataloader(val_set, batch_size=1, sampler_class=None), opt=opt, loss_function=main_dict["loss_dict"][loss_name]) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="UpperBound") history = ms.load_history(main_dict) # model = ms.load_best_model(main_dict) #print("Loaded best model...") else: model, opt, _ = ms.init_model_and_opt(main_dict) import ipdb; ipdb.set_trace() # breakpoint e26f9978 // obj_scores = val.valPascal(model, val_set, predict_method="BestObjectness", n_val=[2]) # ms.images(batch["images"], model.predict(batch, "blobs"), denorm=1) import ipdb; ipdb.set_trace() # breakpoint 08a2a8af // vis.visBlobs(model, batch) vis.visBlobs(model,ms.get_batch(val_set,indices=[14])) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10000, visualize=True) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10, visualize=True) #test_prm(model, batch) # test_prm(model, batch, i=1, j=0) # import ipdb; ipdb.set_trace() # breakpoint a860544a // # img2 = batch["images"].cuda().requires_grad_() # cues=rm.peak_response(model.backbone, img, peak_threshold=1) # batch = ms.get_batch(train_set,indices=[0]) # vis.visBatch(ms.get_batch(train_set,indices=[72])) #vis.visBlobs(model, batch) #ms.images(batch["images"], batch["points"], denorm=1, enlarge=1) # vis.visSplit(model, batch) #model.set_proposal(None); vis.visBlobs(model, batch) # vis.visBlobs(model, batch) #vis.visBlobList(model, val_set, [0, 1,2,3]) # for i in range(len(train_set)): print(i);x=train_set[i] # vis.visBlobs(model, batch) ''' mask = np.zeros(batch["images"].shape)[:,0] ms.images(batch["images"], mask, denorm=1) for i in range(400): mask += (i+1)*(rescale(sharp_proposals[i]["mask"],0.5)>0).astype(int) annList = vis.visAnnList(model, val_set, [34], cocoGt, predict_proposal="BestObjectness") ''' n_images = 10 batch = ms.get_batch(val_set,indices=[9]) import ipdb; ipdb.set_trace() # breakpoint 8d385ace // batch = ms.get_batch(val_set,indices=[50]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10) vis.visBlobs(model, ms.get_batch(val_set,indices=[3]), predict_method="BestDice") vis.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 99558393 // val.valPascal(model, val_set, predict_method="BestObjectness", n_val=10) val.valPascal(model, val_set, predict_method="BestDice", n_val=10) val.valPascal(model, val_set, predict_method="BestDice_no", n_val=[10]) batch = ms.get_batch(val_set,indices=[10]) model.predict(batch, predict_method="BestDice") model.predict(batch, predict_method="BestDice_no") vis.visBlobs(model, batch) vis.visBlobs(model, batch, predict_method=main_dict["predict_name"], cocoGt=val_set.cocoGt) val.valPascal(model, val_set, predict_method="BestObjectness", n_val=15) val.valPascal(model, val_set, predict_method="BoxSegment", n_val=15) val.valPascal(model, val_set, predict_method=main_dict["predict_name"], n_val=15) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=5) vis.visBlobs(model, batch) ms.images(bu.batch2propDict(ms.get_batch(val_set,indices=[1]))["foreground"]) batch = ms.get_batch(val_set,indices=[19]);ms.images(batch["images"],bu.batch2propDict(batch)["foreground"],denorm=1) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="GlanceBestBox") val.valPascal(model, val_set, predict_method="GlanceBestBox", n_val=15) val.valPascal(model, val_set, predict_method="BestDice", n_val=15) import ipdb; ipdb.set_trace() # breakpoint 01f8e3fa // val.valPascal(model, val_set, predict_method=main_dict["predict_name"], n_val=5) import ipdb; ipdb.set_trace() # breakpoint 78d3f03a // vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="BestObjectness") vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="UpperBound") ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) # ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], # opt=opt, epochs=1) # ms.fitData(model, val_set,opt=opt, loss_function=loss_dict[loss_name]) import ipdb; ipdb.set_trace() # breakpoint 51e4d47d // val.valPascal(model, val_set, predict_method="BestObjectness", n_val=n_images) val.valPascal(model, val_set, predict_method="UpperBound", n_val=len(val_set)) # vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1])) vis.visBlobs(model, ms.get_batch(val_set,indices=[1]), predict_method="BestObjectness") n_images = len(val_set) for e in range(5): for i in range(n_images): i_rand = np.random.randint(n_images) i_rand = i # print print(i_rand) batch = ms.get_batch(train_set,indices=[i_rand]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=1) #cocoGt = ms.load_voc2012_val() cocoGt = ms.load_cp_val() ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) # vis.visAnns(model, batch, cocoGt, predict_proposal="BestBoundary") import ipdb; ipdb.set_trace() # breakpoint 6f37a744 // if 1: n_images = 30 resList = [] for k in range(5): for i in range(n_images): print(i) batch = ms.get_batch(val_set,indices=[i]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=2) resList +=[val.valPascal(model, val_set, predict_proposal="excitementInside", n_val=n_images)] # excitementInside ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) import ipdb; ipdb.set_trace() # breakpoint 14451165 // ms.eval_cocoDt(main_dict, predict_proposal="UB_Sharp_withoutVoid") import ipdb; ipdb.set_trace() # breakpoint f3f0fda5 // vis.visAnns(model, batch, cocoGt, predict_proposal="BestObjectness") annList = vis.visAnnList(model, val_set, [1,2], cocoGt, predict_proposal="BestObjectness") annList = ms.load_annList(main_dict, predict_proposal="BestObjectness") ms.eval_cocoDt(main_dict, predict_proposal="UB_Sharp_withoutVoid") # score = np.array([s["score"] for s in annList]) batch = ms.get_batch(val_set,indices=[2]) ms.fitBatch(model, batch, loss_function=loss_dict[main_dict["loss_name"]], opt=opt, epochs=100) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict["water_loss"], opt=opt, epochs=100) ms.fitBatch(model, batch, loss_function=loss_dict["point_loss"], opt=opt, epochs=100) vis.visSplit(model, batch, 0,"water") ''' val.valPascal(model, val_set, predict_proposal="excitementInside", n_val=30) ''' # model.save(batch, path="/mnt/home/issam/Summaries/tmp.png") # batch = ms.get_batch(train_set,indices=[52]) # torch.save(model.state_dict(), "/mnt/home/issam/Saves/model_split.pth") vis.save_images(model, val_set, #indices=np.random.randint(0, len(val_set), 200), indices=np.arange(5,200), path="/mnt/home/issam/Summaries/{}_val/".format(main_dict["dataset_name"])) vis.visBlobs(model, batch) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=10) ms.valBatch(model, batch, metric_dict[metric_name]) ms.validate(model, val_set, metric_class=metric_class) # ms.visBlobs(model, tr_batch) # model.predict(tr_batch,"counts") for i in range(292, 784): batch = ms.get_batch(val_set, indices=[i]) try: score = ms.valBatch(model, batch, metric_dict[metric_name]) except: print(i, batch['name']) import ipdb; ipdb.set_trace() # breakpoint effaca86 // ms.visBlobs(model, batch) if 1: resList = [] for k in range(5): for i in range(10): print(i) batch = ms.get_batch(val_set,indices=[i]) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=1) resList +=[val.valPascal(model, val_set, predict_proposal="BestObjectness", n_val=10)] val.valPascal(model, val_set, predict_proposal="BestBoundary", n_val=30) val.valPascal(model, val_set, predict_proposal="BestObjectness", n_val=list(range(len(val_set)))) #model.predict_proposals(batch) batch = ms.get_batch(val_set,indices=[35]) ms.images(batch["original"], model.predict_proposals(batch, which=0)) ms.images(ms.get_batch(train_set, [300])["original"], train_set.get_proposal(300, indices=[0,1])) # from spn import object_localization #cm = model.class_activation_map(batch["images"].cuda()) # model.display(ms.get_batch(train_set,indices=[3])) # ms. ms.images(255*np.abs( model.predict(ms.get_batch(train_set,indices=[3]), "saliency"))) sal = model.predict(ms.get_batch(train_set,indices=[3]), "saliency") ms.images(np.abs(sal)*255) import ipdb; ipdb.set_trace() # breakpoint c7ca398d // for i in range(1): ms.fit(model, ms.get_dataloader(train_set, batch_size=1, sampler_class=None), loss_function=main_dict["loss_dict"][loss_name], metric_class=main_dict["metric_dict"][metric_name], opt=opt, val_batch=False) ms.fitQuick(model, train_set, loss_name=loss_name, metric_name=metric_name,opt=opt) ms.fitBatch(model, batch, loss_function=loss_dict[loss_name], opt=opt, epochs=100) ms.valBatch(model, batch, metric_dict[metric_name]) ms.visBlobs(model, ms.get_batch(train_set, indices=[3]) ) ms.visBlobs(model, batch) #model = ms.load_best_model(main_dict) #metrics.compute_ap(model, batch) #val.val_cm(main_dict) batch = ms.visBlobsQ(model, val_set, 8) import ipdb; ipdb.set_trace() # breakpoint 5cd16f8f // ul.visSp_prob(model, batch) 3 ms.images(batch["images"], aa, denorm=1) ms.visBlobs(model, batch) ul.vis_nei(model,batch,topk=1000, thresh=0.8,bg=True) ul.vis_nei(model,batch,topk=1000, bg=False) ms.fitQuick(model, train_set, batch_size=batch_size,loss_name=loss_name, metric_name=metric_name) val.validate(model, val_set, metric_name=main_dict["metric_name"], batch_size=main_dict["val_batchsize"]) ms.fitQuick(model, train_set, batch_size=batch_size,loss_name=loss_name, metric_name=metric_name) ms.fitBatch(model, batch, loss_name=loss_name, opt=opt, epochs=100) val.valBatch(model, batch_train, metric_name=metric_name) ms.fitBatch(model, batch, loss_function=losses.expand_loss, opt=opt, epochs=100) ms.visBlobs(model, batch) ms.visWater(model,batch) ms.validate(model, val_set, metric_class=metric_class) import ipdb; ipdb.set_trace() # breakpoint ddad840d // model, opt, _ = ms.init_model_and_opt(main_dict) ms.fitBatch(model, batch, loss_name="water_loss_B", opt=opt, epochs=100) ms.fitQuick(model, train_set, loss_name=loss_name, metric_name=metric_name) # ms.images(batch["images"], batch["labels"], denorm=1) # ms.init.LOSS_DICT["water_loss"](model, batch) import ipdb; ipdb.set_trace() # breakpoint f304b83a // ms.images(batch["images"], model.predict(batch, "labels"), denorm=1) val.valBatch(model, batch, metric_name=main_dict["metric_name"]) ms.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 074c3921 // ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt, epochs=100) for e in range(10): if e == 0: scoreList = [] scoreList += [ms.fitIndices(model, train_set, loss_name=main_dict["loss_name"], batch_size=batch_size, metric_name=metric_name, opt=opt, epoch=e, num_workers=1, ind=np.random.randint(0, len(train_set), 32))] ms.fitData(model, train_set, opt=opt, epochs=10) um.reload(sp);water=sp.watersplit(model, batch).astype(int);ms.images(batch["images"], water, denorm=1) ms.visBlobs(model, batch) ms.images(batch["images"], ul.split_crf(model, batch),denorm=1) losses.dense_crf(model, batch, alpha=61, beta=31, gamma=1) ms.visBlobs(model, batch) model.blob_mode = "superpixels" #---------------------- # Vis Blobs ms.visBlobs(model, batch) ms.images(batch["images"],model.predict(batch, "labels"), denorm=1) # Vis Blobs #ms.visBlobs(model, batch) ms.images(batch["images"], sp.watersplit_test(model, batch).astype(int), denorm=1) #=sp.watersplit(model, batch).astype(int); # Vis CRF ms.images(batch["images"], ul.dense_crf(model, batch, alpha=5,gamma=5,beta=5,smooth=False), denorm=1) ms.images(batch["images"], ul.dense_crf(model, batch), denorm=1) # Eval val.valBatch(model, batch, metric_name=main_dict["metric_name"]) import ipdb; ipdb.set_trace() # breakpoint e9cd4eb0 // model = ms.load_best_model(main_dict) val.valBatch(model, batch, metric_name=main_dict["metric_name"]) ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt) ms.visBlobs(model, batch) import ipdb; ipdb.set_trace() # breakpoint 2167961a // batch=ms.get_batch(train_set, indices=[5]) ms.fitBatch(model, batch, loss_name=main_dict["loss_name"], opt=opt) ms.images(batch["images"], model.predict(batch, "probs"), denorm=1) ms.visBlobs(model, batch) val.validate(model, val_set, metric_name=main_dict["metric_name"]) val.validate(model, val_set, metric_name="SBD")
def test_COCOmap(main_dict): # create_voc2007(main_dict) model = ms.load_best_model(main_dict) _, val_set = ms.load_trainval(main_dict) path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/" fname = "{}/instances_val2012.json".format(path_base) cocoGt = COCO(fname) # fname = "{}/instances_val2012.json".format(path_base) # cocoGt = COCO(fname) fname = (path_base + "/results/" + main_dict["exp_name"] + "_" + str(main_dict["model_options"]["predict_proposal"]) + ".json") # test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs) import ipdb ipdb.set_trace() # breakpoint 06c353ef // # test_list(model, cocoGt, val_set, [0], prp.BestObjectness) # test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs) if not os.path.exists(fname) or 1: annList = [] for i in range(len(val_set)): batch = ms.get_batch(val_set, [i]) try: annList += model.predict(batch, "annList") except Exception as exc: import ipdb ipdb.set_trace() # breakpoint 5f61b0cfx // if (i % 100) == 0: cocoEval, _ = d_helpers.evaluateAnnList(annList) ms.save_json(fname.replace(".json", "inter.json"), annList) # ms.save_json("tmp.json", annList) # cocoDt = cocoGt.loadRes("tmp.json") # cocoEval = COCOeval(cocoGt, cocoDt, "segm") # cocoEval.params.imgIds = list(set([v["image_id"] for v in cocoDt.anns.values()])) # cocoEval.evaluate() # cocoEval.accumulate() # cocoEval.summarize() print("{}/{}".format(i, len(val_set))) ms.save_json(fname, annList) # cocoEval = d_helpers.evaluateAnnList(ms.load_json(fname)) # cocoEval = COCOeval(cocoGt, cocoDt, annType) #cocoEval.params.imgIds = list(set([v["image_id"] for v in cocoDt.anns.values()])) if 1: #cocoEval.params.imgIds = [2007000033] cocoDt = cocoGt.loadRes(fname) cocoEval = COCOeval(cocoGt, cocoDt, "segm") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() print("Images:", len(cocoEval.params.imgIds)) print("Model: {}, Loss: {}, Pred: {}".format( main_dict["model_name"], main_dict["loss_name"], main_dict["model_options"]["predict_proposal"])) import ipdb ipdb.set_trace() # breakpoint c6f8f580 // # d_helpers.visGT(cocoGt, cocoDt,ms.get_batch(val_set, [169])) # d_helpers.valList(cocoGt, cocoDt, val_set, [173,174]) # model.set_proposal(None); vis.visBlobs(model, ms.get_batch(val_set, [169]), "blobs") return "mAP25: {:.2f} - mAP75:{:.2f}".format(cocoEval.stats[1], cocoEval.stats[2])
def main(): parser = argparse.ArgumentParser() parser.add_argument('-e', '--exp') parser.add_argument('-b', '--borgy', default=0, type=int) parser.add_argument('-br', '--borgy_running', default=0, type=int) parser.add_argument('-m', '--mode', default="summary") parser.add_argument('-r', '--reset', default="None") parser.add_argument('-s', '--status', type=int, default=0) parser.add_argument('-k', '--kill', type=int, default=0) parser.add_argument('-g', '--gpu', type=int) parser.add_argument('-c', '--configList', nargs="+", default=None) parser.add_argument('-l', '--lossList', nargs="+", default=None) parser.add_argument('-d', '--datasetList', nargs="+", default=None) parser.add_argument('-metric', '--metricList', nargs="+", default=None) parser.add_argument('-model', '--modelList', nargs="+", default=None) parser.add_argument('-p', '--predictList', nargs="+", default=None) args = parser.parse_args() if args.borgy or args.kill: global_prompt = input("Do all? \n(y/n)\n") # SEE IF CUDA IS AVAILABLE assert torch.cuda.is_available() print("CUDA: %s" % torch.version.cuda) print("Pytroch: %s" % torch.__version__) mode = args.mode exp_name = args.exp exp_dict = experiments.get_experiment_dict(args, exp_name) pp_main = None results = {} # Get Main Class project_name = os.path.realpath(__file__).split("/")[-2] MC = ms.MainClass(path_models="models", path_datasets="datasets", path_metrics="metrics/metrics.py", path_losses="losses/losses.py", path_samplers="addons/samplers.py", path_transforms="addons/transforms.py", path_saves="/mnt/projects/counting/Saves/main/", project=project_name) key_set = set() for model_name, config_name, metric_name, dataset_name, loss_name in product( exp_dict["modelList"], exp_dict["configList"], exp_dict["metricList"], exp_dict["datasetList"], exp_dict["lossList"]): # if model_name in ["LC_RESFCN"]: # loss_name = "water_loss" config = configs.get_config_dict(config_name) key = ("{} - {} - {}".format(model_name, config_name, loss_name), "{}_({})".format(dataset_name, metric_name)) if key in key_set: continue key_set.add(key) main_dict = MC.get_main_dict(mode, dataset_name, model_name, config_name, config, args.reset, exp_dict["epochs"], metric_name, loss_name) main_dict["predictList"] = exp_dict["predictList"] if mode == "paths": print("\n{}_({})".format(dataset_name, model_name)) print(main_dict["path_best_model"]) # print( main_dict["exp_name"]) predictList_str = ' '.join(exp_dict["predictList"]) if args.status: results[key] = borgy.borgy_status(mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if args.kill: results[key] = borgy.borgy_kill(mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if args.borgy: results[key] = borgy.borgy_submit(project_name, global_prompt, mode, config_name, metric_name, model_name, dataset_name, loss_name, args.reset, predictList_str) continue if mode == "debug": debug.debug(main_dict) if mode == "validate": validate.validate(main_dict) if mode == "save_gam_points": train_set, _ = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) for i in range(len(train_set)): print(i, "/", len(train_set)) batch = ms.get_batch(train_set, [i]) fname = train_set.path + "/gam_{}.pkl".format( batch["index"].item()) points = model.get_points(batch) ms.save_pkl(fname, points) import ipdb ipdb.set_trace() # breakpoint ee49ab9f // if mode == "save_prm_points": train_set, _ = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) for i in range(len(train_set)): print(i, "/", len(train_set)) batch = ms.get_batch(train_set, [i]) fname = "{}/prm{}.pkl".format(batch["path"][0], batch["name"][0]) points = model.get_points(batch) ms.save_pkl(fname, points) import ipdb ipdb.set_trace() # breakpoint 679ce152 // # train_set, _ = au.load_trainval(main_dict) # model = ms.load_best_model(main_dict) # for i in range(len(train_set)): # print(i, "/", len(train_set)) # batch = ms.get_batch(train_set, [i]) # fname = train_set.path + "/gam_{}.pkl".format(batch["index"].item()) # points = model.get_points(batch) # ms.save_pkl(fname, points) # if mode == "pascal_annList": # data_utils.pascal2lcfcn_points(main_dict) if mode == "upperboundmasks": import ipdb ipdb.set_trace() # breakpoint 02fac8ce // results = au.test_upperboundmasks(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "model": results = au.test_model(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "upperbound": results = au.test_upperbound(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "MUCov": gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset) # model = ms.load_best_model(main_dict) fname = main_dict["path_save"] + "/pred_annList.pkl" if not os.path.exists(fname): _, val_set = au.load_trainval(main_dict) model = ms.load_best_model(main_dict) pred_annList = au.dataset2annList(model, val_set, predict_method="BestDice", n_val=None) ms.save_pkl(fname, pred_annList) else: pred_annList = ms.load_pkl(fname) import ipdb ipdb.set_trace() # breakpoint 527a7f36 // pred_annList = au.load_predAnnList(main_dict, predict_method="BestObjectness") # 0.31 best objectness pred_annList = # 0.3482122335421256 # au.get_MUCov(gtAnnDict, pred_annList) au.get_SBD(gtAnnDict, pred_annList) if mode == "dic_sbd": import ipdb ipdb.set_trace() # breakpoint 4af08a17 // if mode == "point_mask": from datasets import base_dataset import ipdb ipdb.set_trace() # breakpoint 7fd55e0c // _, val_set = ms.load_trainval(main_dict) batch = ms.get_batch(val_set, [1]) model = ms.load_best_model(main_dict) pred_dict = model.LCFCN.predict(batch) # ms.pretty_vis(batch["images"], base_dataset.batch2annList(batch)) ms.images(ms.pretty_vis( batch["images"], model.LCFCN.predict(batch, predict_method="original")["annList"]), win="blobs") ms.images(ms.pretty_vis(batch["images"], base_dataset.batch2annList(batch)), win="erww") ms.images(batch["images"], batch["points"], denorm=1, enlarge=1, win="e21e") import ipdb ipdb.set_trace() # breakpoint ab9240f0 // if mode == "lcfcn_output": import ipdb ipdb.set_trace() # breakpoint 7fd55e0c // gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset) if mode == "load_gtAnnDict": _, val_set = au.load_trainval(main_dict) gtAnnDict = au.load_gtAnnDict(val_set) # gtAnnClass = COCO(gtAnnDict) # au.assert_gtAnnDict(main_dict, reset=None) # _,val_set = au.load_trainval(main_dict) # annList_path = val_set.annList_path # fname_dummy = annList_path.replace(".json","_best.json") # predAnnDict = ms.load_json(fname_dummy) import ipdb ipdb.set_trace() # breakpoint 100bfe1b // pred_annList = ms.load_pkl(main_dict["path_best_annList"]) # model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) batch = ms.get_batch(val_set, [1]) import ipdb ipdb.set_trace() # breakpoint 2310bb33 // model = ms.load_best_model(main_dict) pred_dict = model.predict(batch, "BestDice", "mcg") ms.images(batch["images"], au.annList2mask(pred_dict["annList"])["mask"], denorm=1) # pointList2UpperBoundMCG pred_annList = au.load_predAnnList(main_dict, predict_method="BestDice", proposal_type="mcg", reset="reset") # annList = au.pointList2UpperBoundMCG(batch["lcfcn_pointList"], batch)["annList"] ms.images(batch["images"], au.annList2mask(annList)["mask"], denorm=1) pred_annList = au.load_BestMCG(main_dict, reset="reset") # pred_annList = au.dataset2annList(model, val_set, # predict_method="BestDice", # n_val=None) au.get_perSizeResults(gtAnnDict, pred_annList) if mode == "vis": _, val_set = au.load_trainval(main_dict) batch = ms.get_batch(val_set, [3]) import ipdb ipdb.set_trace() # breakpoint 05e6ef16 // vis.visBaselines(batch) model = ms.load_best_model(main_dict) vis.visBlobs(model, batch) if mode == "qual": model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) path = "/mnt/home/issam/Summaries/{}_{}".format( dataset_name, model_name) try: ms.remove_dir(path) except: pass n_images = len(val_set) base = "{}_{}".format(dataset_name, model_name) for i in range(50): print(i, "/10", "- ", base) index = np.random.randint(0, n_images) batch = ms.get_batch(val_set, [index]) if len(batch["lcfcn_pointList"]) == 0: continue image = vis.visBlobs(model, batch, return_image=True) # image_baselines = vis.visBaselines(batch, return_image=True) # imgAll = np.concatenate([image, image_baselines], axis=1) fname = path + "/{}_{}.png".format(i, base) ms.create_dirs(fname) ms.imsave(fname, image) if mode == "test_baselines": import ipdb ipdb.set_trace() # breakpoint b51c5b1f // results = au.test_baselines(main_dict, reset=args.reset) print(pd.DataFrame(results)) if mode == "test_best": au.test_best(main_dict) if mode == "qualitative": au.qualitative(main_dict) if mode == "figure1": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*") # vidList.sort() # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void") batch = ms.get_batch(val_set, [68]) bestdice = ms.visPretty(model, batch=batch, with_void=0, win="no_void") blobs = ms.visPretty(model, batch=batch, predict_method="blobs", with_void=0, win="no_void") ms.images(bestdice, win="BestDice") ms.images(blobs, win="Blobs") ms.images(batch["images"], denorm=1, win="Image") ms.images(batch["images"], batch["points"], enlarge=1, denorm=1, win="Points") import ipdb ipdb.set_trace() # breakpoint cf4bb3d3 // if mode == "video2": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) _, val_set = au.load_trainval(main_dict) # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*") # vidList.sort() index = 0 for i in range(len(val_set)): # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void") batch = ms.get_batch(val_set, [i]) pretty_image = ms.visPretty(model, batch=batch, with_void=0, win="no_void") # pred_dict = model.predict(batch, predict_method="BestDice") path_summary = main_dict["path_summary"] ms.create_dirs(path_summary + "/tmp") ms.imsave( path_summary + "vid_mask_{}.png".format(index), ms.get_image(batch["images"], batch["points"], enlarge=1, denorm=1)) index += 1 ms.imsave(path_summary + "vid_mask_{}.png".format(index), pretty_image) index += 1 # ms.imsave(path_summary+"vid1_full_{}.png".format(i), ms.get_image(img, pred_dict["blobs"], denorm=1)) print(i, "/", len(val_set)) if mode == "video": from PIL import Image from addons import transforms model = ms.load_best_model(main_dict) # _, val_set = au.load_trainval(main_dict) proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/" vidList = glob( "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*" ) vidList.sort() for i, img_path in enumerate(vidList): image = Image.open(img_path).convert('RGB') image = image.resize((1200, 600), Image.BILINEAR) img, _ = transforms.Tr_WTP_NoFlip()([image, image]) pred_dict = model.predict( { "images": img[None], "split": ["test"], "resized": torch.FloatTensor([1]), "name": [ms.extract_fname(img_path)], "proposals_path": [proposals_path] }, predict_method="BestDice") path_summary = main_dict["path_summary"] ms.create_dirs(path_summary + "/tmp") ms.imsave(path_summary + "vid1_mask_{}.png".format(i), ms.get_image(pred_dict["blobs"])) ms.imsave(path_summary + "vid1_full_{}.png".format(i), ms.get_image(img, pred_dict["blobs"], denorm=1)) print(i, "/", len(vidList)) if mode == "5_eval_BestDice": gtAnnDict = au.load_gtAnnDict(main_dict) gtAnnClass = COCO(gtAnnDict) results = au.assert_gtAnnDict(main_dict, reset=None) if mode == "cp_annList": ms.dataset2cocoformat(dataset_name="CityScapes") if mode == "pascal2lcfcn_points": data_utils.pascal2lcfcn_points(main_dict) if mode == "cp2lcfcn_points": data_utils.cp2lcfcn_points(main_dict) if mode == "train": train.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "train_only": train.main(main_dict, train_only=True) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "sharpmask2psfcn": for split in ["train", "val"]: root = "/mnt/datasets/public/issam/COCO2014/ProposalsSharp/" path = "{}/sharpmask/{}/jsons/".format(root, split) jsons = glob(path + "*.json") propDict = {} for k, json in enumerate(jsons): print("{}/{}".format(k, len(jsons))) props = ms.load_json(json) for p in props: if p["image_id"] not in propDict: propDict[p["image_id"]] = [] propDict[p["image_id"]] += [p] for k in propDict.keys(): fname = "{}/{}.json".format(root, k) ms.save_json(fname, propDict[k]) if mode == "cp2coco": import ipdb ipdb.set_trace() # breakpoint f2eb9e70 // dataset2cocoformat.cityscapes2cocoformat(main_dict) # train.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "train_lcfcn": train_lcfcn.main(main_dict) import ipdb ipdb.set_trace() # breakpoint a5d091b9 // if mode == "summary": try: history = ms.load_history(main_dict) # if predictList_str == "MAE": # results[key] = "{}/{}: {:.2f}".format(history["best_model"]["epoch"], # history["epoch"], # history["best_model"][metric_name]) # else: val_dict = history["val"][-1] val_dict = history["best_model"] iou25 = val_dict["0.25"] iou5 = val_dict["0.5"] iou75 = val_dict["0.75"] results[key] = "{}/{}: {:.1f} - {:.1f} - {:.1f}".format( val_dict["epoch"], history["epoch"], iou25 * 100, iou5 * 100, iou75 * 100) # if history["val"][-1]["epoch"] != history["epoch"]: # results[key] += " | Val {}".format(history["epoch"]) try: results[key] += " | {}/{}".format( len(history["trained_batch_names"]), history["train"][-1]["n_samples"]) except: pass except: pass if mode == "vals": history = ms.load_history(main_dict) for i in range(1, len(main_dict["predictList"]) + 1): if len(history['val']) == 0: res = "NaN" continue else: res = history["val"][-i] map50 = res["map50"] map75 = res["map75"] # if map75 < 1e-3: # continue string = "{} - {} - map50: {:.2f} - map75: {:.2f}".format( res["epoch"], res["predict_name"], map50, map75) key_tmp = list(key).copy() key_tmp[1] += " {} - {}".format(metric_name, res["predict_name"]) results[tuple(key_tmp)] = string # print("map75", pd.DataFrame(history["val"])["map75"].max()) # df = pd.DataFrame(history["vals"][:20])["water_loss_B"] # print(df) try: print(ms.dict2frame(results)) except: print("Results not printed...")