def test(dataset_name, model_name, metric_name, path_history="checkpoints/", path_best_model=""): history = ut.load_json(path_history) transformer = ut.ComposeJoint([[transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong()]]) test_set = dataset_dict[dataset_name](split="test", transform_function=transformer) model = model_dict[model_name](n_classes=test_set.n_classes).cuda() # path_best_model = "/mnt/home/issam/LCFCNSaves/pascal/State_Dicts/best_model.pth" model.load_state_dict(torch.load(path_best_model)) model.trained_images = set(history["trained_images"]) testDict = ut.val(model=model, dataset=test_set, epoch=history["best_val_epoch"], metric_name=metric_name) print(pd.DataFrame([testDict]))
def apply(image_path, model_name, model_path): transformer = ut.ComposeJoint([[transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong()]]) # Load best model model = model_dict[model_name](n_classes=2).cuda() model.load_state_dict(torch.load(model_path)) # Read Image image_raw = imread(image_path) cv2.imshow("img", image_raw) cv2.waitKeyEx() collection = list(map(FT.to_pil_image, [image_raw, image_raw])) image, _ = transformer(collection) batch = {"images": image[None]} # Make predictions pred_blobs = model.predict(batch, method="blobs").squeeze() pred_counts = int(model.predict(batch, method="counts").ravel()[0]) # Save Output save_path = "figures/_blobs_count_{}.png".format(pred_counts) imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs)) print("| Counts: {}\n| Output saved in: {}".format(pred_counts, save_path))
def apply(image_path, model_name, model_path): transformer = ut.ComposeJoint([[transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong()]]) # Load best model model = model_dict[model_name](n_classes=2).cuda() model.load_state_dict(torch.load(model_path)) # Read Image image_raw = imread(image_path) collection = list(map(FT.to_pil_image, [image_raw, image_raw])) image, _ = transformer(collection) batch = {"images": image[None]} # Make predictions pred_blobs = model.predict(batch, method="blobs").squeeze() pred_counts = int(model.predict(batch, method="counts").ravel()[0]) # Save Output save_path = image_path + "_blobs_count:{}.png".format(pred_counts) imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs)) #Get the centroid of output image center = dict() img = imread(save_path) #Save centroid Centorid_path = image_path + "_centroid_blobs_count:{}.png".format( pred_counts) #Save predicted bbox bbox_file = "{}_boundingbox.txt".format(image_path[:-4]) im_name = os.path.basename(image_path) for i in range(pred_counts): props = measure.regionprops(pred_blobs) center[i] = props[i].centroid cy, cx = draw.circle(center[i][0], center[i][1], 6) draw.set_color(img, [cy, cx], [255, 0, 0]) minr, minc, maxr, maxc = props[i].bbox with open(bbox_file, 'a+') as bf: name = im_name.split('_') y_i = int(name[1]) tmp_x = name[2].split('.') x_j = int(tmp_x[0]) bf.write( str(int(minc) + x_j) + ' ' + str(int(minr) + y_i) + ' ' + str(int(maxc) + x_j) + ' ' + str(int(maxr) + y_i) + '\n') cv2.rectangle(img, (minc, minr), (maxc, maxr), color=(255, 0, 0), thickness=2) imsave(Centorid_path, img) print('ok,get centroid finished')
def apply(image_path, model_name, model_path): transformer = ut.ComposeJoint([[transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong()]]) # Load best model # init model with n_classes n_classes = 21 if 'pascal' in model_path else 2 model = model_dict[model_name](n_classes=n_classes).cuda() model.load_state_dict(torch.load(model_path)) print('load done!') # Read Image image_raw = imread(image_path) collection = list(map(FT.to_pil_image, [image_raw, image_raw])) image, _ = transformer(collection) batch = {"images": image[None]} # Make predictions pred_blobs = model.predict( batch, method="blobs").squeeze() # (20, 375, 500), need cvt to 2D # for 1-class, squeeze -> 2D # for 20-class, can't if len(pred_blobs.shape) == 3: pred_blobs = cvt_multi_class_to_one_mask(pred_blobs) # pred_counts = int(model.predict(batch, method="counts").ravel()[0]) pred_counts = model.predict(batch, method="counts").squeeze().astype(int) # Save Output save_path = image_path + "_blobs_count:{}.png".format(sum(pred_counts)) imsave(save_path, ut.combine_image_blobs(image_raw, pred_blobs)) print("| Counts: {}\n| Output saved in: {}".format(pred_counts, save_path))
def train(dataset_name, model_name, metric_name, path_history, path_model, path_opt, path_best_model, reset=False): # SET SEED np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) # Train datasets transformer = ut.ComposeJoint( [ut.RandomHorizontalFlipJoint(), [transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong() ] ]) train_set = dataset_dict[dataset_name](split="train", transform_function=transformer) trainloader = torch.utils.data.DataLoader(train_set, batch_size=1, num_workers=0, drop_last=False, sampler=ut.RandomSampler(train_set)) # Val datasets transformer = ut.ComposeJoint( [ [transforms.ToTensor(), None], [transforms.Normalize(*ut.mean_std), None], [None, ut.ToLong() ] ]) val_set = dataset_dict[dataset_name](split="val", transform_function=transformer) test_set = dataset_dict[dataset_name](split="test", transform_function=transformer) # Model model = model_dict[model_name](train_set.n_classes).cuda() opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-5, weight_decay=0.0005) # Train if os.path.exists(path_history) and not reset: history = ut.load_json(path_history) model.load_state_dict(torch.load(path_model)) opt.load_state_dict(torch.load(path_opt)) s_epoch = history["train"][-1]["epoch"] print("Resuming epoch...{}".format(s_epoch)) else: history = {"train":[], "val":[], "test":[], "model_name":model_name, "dataset_name":dataset_name, "path_model":path_model, "path_opt":path_opt, "path_best_model":path_best_model, "best_val_epoch":-1, "best_val_mae":np.inf} s_epoch = 0 print("Starting from scratch...") for epoch in range(s_epoch + 1, 1000): train_dict = ut.fit(model, trainloader, opt, loss_function=losses.lc_loss, epoch=epoch) # Update history history["trained_images"] = list(model.trained_images) history["train"] += [train_dict] # Save model, opt and history torch.save(model.state_dict(), path_model) torch.save(opt.state_dict(), path_opt) ut.save_json(path_history, history) # %%%%%%%%%%% 2. VALIDATION PHASE %%%%%%%%%%%%" with torch.no_grad(): val_dict = ut.val(model=model, dataset=val_set, epoch=epoch, metric_name=metric_name) # Update history history["val"] += [val_dict] # Lower is better if val_dict[metric_name] <= history["best_val_mae"]: history["best_val_epoch"] = epoch history["best_val_mae"] = val_dict[metric_name] torch.save(model.state_dict(), path_best_model) # Test Model if not (dataset_name == "penguins" and epoch < 50): testDict = ut.val(model=model, dataset=test_set, epoch=epoch, metric_name=metric_name) history["test"] += [testDict] ut.save_json(path_history, history)