def test(model, features, labels, adj, idx_train, img_name, epoch, t4epoch): """ 1.evaluate loss, accuracy, and IoU 2.save IoU and accuracy in "evaluation4image.txt" """ model.eval() output = model(features, adj).detach() predictions = torch.argmax(output, dim=1).cpu().numpy() mask_gt = Image.open(os.path.join(args.path4Class, img_name + '.png')) mask_gt = np.asarray(mask_gt) # 1.evaluate loss loss_train = F.nll_loss(output[idx_train], labels[idx_train]) # 1.evaluate accuracy and IoU IoU_one_image = IOUMetric(args.num_class) IoU_one_image.add_batch(predictions.cpu().numpy(), mask_gt) acc, acc_cls, iu, mean_iu_tensor, fwavacc = IoU_one_image.evaluate() # show information print("[{:03d}]=== Information:\n".format(epoch + 1), 'mean_IoU: {:>8.5f}'.format(mean_iu_tensor.item()), 'acc: {:>11.5f}'.format(acc), 'loss_train: {:<8.4f}'.format(loss_train.item()), 'time: {:<8.4f}s'.format(time.time() - t4epoch)) # 2.save information print("save accuracy and IoU:" + img_name + ' predict') with open("evaluation4image.txt", 'a') as f: f.write(img_name + "\t") f.write("IoU" + str(mean_iu_tensor.item()) + "\t") f.write("Acc:" + str(acc) + "\n")
learned_dict = torch.from_numpy(learned_dict).to(torch.float32) shape_mean = torch.from_numpy(shape_mean).to(torch.float32) shape_std = torch.from_numpy(shape_std).to(torch.float32) # build data loader. mask_data = MaskLoader(root=dataset_root, dataset=args.dataset, size=mask_size) mask_loader = DataLoader(mask_data, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False) size_data = len(mask_loader) sparsity_counts = [] kurtosis_counts = [] all_masks = [] all_codes = [] reconstruction_error = [] # evaluation. IoUevaluate = IOUMetric(2) print("Start evaluation ...") for i, masks in enumerate(mask_loader): print("Eva [{} / {}]".format(i, size_data)) # generate the reconstruction mask. masks = masks.view(masks.shape[0], -1) # a batch of masks: (N, 784) masks = masks.to(torch.float32) if args.dtm_type == 'standard': dtms = prepare_distance_transform_from_mask(masks, mask_size) elif args.dtm_type == 'reciprocal': dtms = prepare_reciprocal_DTM_from_mask(masks, mask_size) elif args.dtm_type == 'complement': dtms = prepare_complement_DTM_from_mask(masks, mask_size) elif args.dtm_type == 'other': dtms = prepare_other_DTM_from_mask(masks, mask_size, args.bg_constant, args.norm_constant)
def _Class_IOU(confusion_matrix): MIoU = np.diag(confusion_matrix) / (np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) - np.diag(confusion_matrix)) return MIoU confusion_matrix = _generate_matrix(gt.astype(np.int8), pred_label.astype(np.int8)) miou = _Class_IOU(confusion_matrix) acc = np.diag(confusion_matrix).sum() / confusion_matrix.sum() return miou, acc # if __name__ == "__main__": iou = IOUMetric(10) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_path = '/media/limzero/qyl/HWCC2020_RS_segmentation/outputs/efficient-b3/ckpt/checkpoint-epoch2.pth' model = load_model(model_path) data_dir = "/media/limzero/qyl/mmsegmentation/data/satellite_jpg/" val_imgs_dir = os.path.join(data_dir, "img_dir/val/") val_labels_dir = os.path.join(data_dir, "ann_dir/val/") valid_data = RSCDataset(val_imgs_dir, val_labels_dir, transform=val_transform) valid_loader = DataLoader(dataset=valid_data, batch_size=16, shuffle=False, num_workers=1) model.eval() with torch.no_grad():
def train(**kwargs): """ GCN training --- - the folder you need: - args.path4AffGraph - args.path4node_feat - path4partial_label - these folder would be created: - data/GCN_prediction/label - data/GCN_prediction/logit """ # os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, [0, 1, 2, 3])) t_start = time.time() # 根据命令行参数更新配置 args.parse(**kwargs) # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = torch.device("cuda:" + str(kwargs["GPU"])) print(device) # 把有改動的參數寫到tensorboard名稱上 if kwargs["debug"] is False: comment_init = '' for k, v in kwargs.items(): comment_init += '|{} '.format(v) writer = SummaryWriter(comment=comment_init) # === set evaluate object for evaluate later IoU = IOUMetric(args.num_class) IoU_CRF = IOUMetric(args.num_class) # === dataset train_dataloader = graph_voc(start_idx=kwargs["start_index"], end_idx=kwargs["end_index"], device=device) # === for each image, do training and testing in the same graph # for ii, (adj_t, features_t, labels_t, rgbxy_t, img_name, label_fg_t, # label_bg_t) in enumerate(train_dataloader): t4epoch = time.time() for ii, data in enumerate(train_dataloader): if data is None: continue # === use RGBXY as feature # if args.use_RGBXY: # data["rgbxy_t"] = normalize_rgbxy(data["rgbxy_t"]) # features_t = data["rgbxy_t"].clone() # === only RGB as feature t_be = time.time() if args.use_lap: """ is constructing................ """ H, W, C = data["rgbxy_t"].shape A = torch.zeros([H * W, H * W], dtype=torch.float64) def find_neibor(card_x, card_y, H, W, radius=2): """ Return idx of neibors of (x,y) in list --- """ neibors_idx = [] for idx_x in np.arange(card_x - radius, card_x + radius + 1): for idx_y in np.arange(card_y - radius, card_y + radius + 1): if (-radius < idx_x < H) and (-radius < idx_y < W): neibors_idx.append( (idx_x * W + idx_y, idx_x, idx_y)) return neibors_idx t_start = time.time() t_start = t4epoch neibors = dict() for node_idx in range(H * W): card_x, card_y = node_idx // W, node_idx % W neibors = find_neibor(card_x, card_y, H, W, radius=1) # print("H:{} W:{} | {} -> ({},{})".format( # H, W, node_idx, card_x, card_y)) for nei in neibors: # print("nei: ", nei) diff_rgb = data["rgbxy_t"][ card_x, card_y, :3] - data["rgbxy_t"][nei[1], nei[2], :3] diff_xy = data["rgbxy_t"][card_x, card_y, 3:] - data["rgbxy_t"][nei[1], nei[2], 3:] A[node_idx, nei[0]] = torch.exp( -torch.pow(torch.norm(diff_rgb), 2) / (2. * args.CRF_deeplab["bi_rgb_std"])) + torch.exp( -torch.pow(torch.norm(diff_xy), 2) / (2. * args.CRF_deeplab["bi_xy_std"])) # print("{:3.1f}s".format(time.time() - t_start)) D = torch.diag(A.sum(dim=1)) L_mat = D - A print("time for Laplacian {:3f} s".format(time.time() - t_be)) # === Model and optimizer img_label = load_image_label_from_xml(img_name=data["img_name"], voc12_root=args.path4VOC_root) img_class = [idx + 1 for idx, f in enumerate(img_label) if int(f) == 1] num_class = np.max(img_class) + 1 # debug("num_class: {} {}".format(num_class + 1, type(num_class + 1)), # line=290) model = GCN( nfeat=data["features_t"].shape[1], nhid=args.num_hid_unit, # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # image label don't have BG # adaptive num_class should have better performance nclass=args.num_class, # args.num_class| num_class # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> dropout=args.drop_rate) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # ==== moving tensor to GPU if args.cuda: model.to(device) data["features_t"] = data["features_t"].to(device) data["adj_t"] = data["adj_t"].to(device) data["labels_t"] = data["labels_t"].to(device) data["label_fg_t"] = data["label_fg_t"].to(device) data["label_bg_t"] = data["label_bg_t"].to(device) # L_mat = L_mat.to(device) # === save the prediction before training if args.save_mask_before_train: model.eval() postprocess_image_save(img_name=data["img_name"], model_output=model(data["features_t"], data["adj_t"]).detach(), epoch=0) # ==== Train model # t4epoch = time.time() criterion_ent = HLoss() # criterion_sym = symmetricLoss() for epoch in range(args.max_epoch): model.train() optimizer.zero_grad() output = model(data["features_t"], data["adj_t"]) # === seperate FB/BG label loss_fg = F.nll_loss(output, data["label_fg_t"], ignore_index=255) loss_bg = F.nll_loss(output, data["label_bg_t"], ignore_index=255) # F.log_softmax(label_fg_t, dim=1) # loss_sym = criterion_sym(output, labels_t, ignore_index=255) loss = loss_fg + loss_bg if args.use_ent: loss_entmin = criterion_ent(output, data["labels_t"], ignore_index=255) loss += 10. * loss_entmin if args.use_lap: loss_lap = torch.trace( torch.mm(output.transpose(1, 0), torch.mm(L_mat.type_as(output), output))) / (H * W) gamma = 1e-2 loss += gamma * loss_lap # loss = F.nll_loss(output, labels_t, ignore_index=255) if loss is None: print("skip this image: ", data["img_name"]) break # === for normalize cut # lamda = args.lamda # n_cut = 0. # if args.use_regular_NCut: # W = gaussian_propagator(output) # d = torch.sum(W, dim=1) # for k in range(output.shape[1]): # s = output[idx_test_t, k] # n_cut = n_cut + torch.mm( # torch.mm(torch.unsqueeze(s, 0), W), # torch.unsqueeze(1 - s, 1)) / (torch.dot(d, s)) # === calculus loss & updated parameters # loss_train = loss.cuda() + lamda * n_cut loss_train = loss.cuda() loss_train.backward() optimizer.step() # === save predcit mask at max epoch & IoU of img if (epoch + 1) % args.max_epoch == 0 and args.save_mask: t_now = time.time() if not kwargs["debug"]: evaluate_IoU(model=model, features=data["features_t"], adj=data["adj_t"], img_name=data["img_name"], epoch=args.max_epoch, img_idx=ii + 1, writer=writer, IoU=IoU, IoU_CRF=IoU_CRF, use_CRF=False, save_prediction_np=True) print("[{}/{}] time: {:.4f}s\n\n".format( ii + 1, len(train_dataloader), t_now - t4epoch)) t4epoch = t_now # end for epoch # print( # "loss: {} | loss_fg: {} | loss_bg:{} | loss_entmin: {} | loss_lap: {}" # .format(loss.data.item(), loss_fg.data.item(), loss_bg.data.item(), # loss_entmin.data.item(), loss_lap.data.item())) # end for dataloader if kwargs["debug"] is False: writer.close() print("training was Finished!") print("Total time elapsed: {:.0f} h {:.0f} m {:.0f} s\n".format( (time.time() - t_start) // 3600, (time.time() - t_start) / 60 % 60, (time.time() - t_start) % 60))
def gcn_train(**kwargs): """ GCN training --- - the folder you need: - args.path4AffGraph - args.path4node_feat - path4partial_label - these folder would be created: - data/GCN4DeepLab/Label - data/GCN4DeepLab/Logit """ t_start = time.time() # update config args.parse(**kwargs) device = torch.device("cuda:" + str(kwargs["GPU"])) print(device) # tensorboard if args.use_TB: time_now = datetime.datetime.today() time_now = "{}-{}-{}|{}-{}".format(time_now.year, time_now.month, time_now.day, time_now.hour, time_now.minute // 30) keys_ignore = ["start_index", "GPU"] comment_init = '' for k, v in kwargs.items(): if k not in keys_ignore: comment_init += '|{} '.format(v) writer = SummaryWriter( logdir='runs/{}/{}'.format(time_now, comment_init)) # initial IoUMetric object for evaluation IoU = IOUMetric(args.num_class) # initial dataset train_dataloader = graph_voc(start_idx=kwargs["start_index"], end_idx=kwargs["end_index"], device=device) # train a seperate GCN for each image t4epoch = time.time() for ii, data in enumerate(train_dataloader): if data is None: continue img_label = load_image_label_from_xml(img_name=data["img_name"], voc12_root=args.path4VOC_root) img_class = [idx + 1 for idx, f in enumerate(img_label) if int(f) == 1] num_class = np.max(img_class) + 1 model = GCN(nfeat=data["features_t"].shape[1], nhid=args.num_hid_unit, nclass=args.num_class, dropout=args.drop_rate) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # put data into GPU if args.cuda: model.to(device) data["features_t"] = data["features_t"].to(device) data["adj_t"] = data["adj_t"].to(device) data["labels_t"] = data["labels_t"].to(device) data["label_fg_t"] = data["label_fg_t"].to(device) data["label_bg_t"] = data["label_bg_t"].to(device) t_be = time.time() H, W, C = data["rgbxy_t"].shape N = H * W # laplacian if args.use_lap: L_mat = compute_lap_test(data, device, radius=2).to(device) print("Time for laplacian {:3.1f} s".format(time.time() - t_be)) criterion_ent = HLoss() for epoch in range(args.max_epoch): model.train() optimizer.zero_grad() output = model(data["features_t"], data["adj_t"]) # foreground and background loss loss_fg = F.nll_loss(output, data["label_fg_t"], ignore_index=255) loss_bg = F.nll_loss(output, data["label_bg_t"], ignore_index=255) loss = loss_fg + loss_bg if args.use_ent: loss_entmin = criterion_ent(output, data["labels_t"], ignore_index=255) loss += 10. * loss_entmin if args.use_lap: loss_lap = torch.trace( torch.mm(output.transpose(1, 0), torch.mm(L_mat.type_as(output), output))) / N gamma = 1e-2 loss += gamma * loss_lap if loss is None: print("skip this image: ", data["img_name"]) break loss_train = loss.cuda() loss_train.backward() optimizer.step() # save predicted mask and IoU at max epoch if (epoch + 1) % args.max_epoch == 0 and args.save_mask: t_now = time.time() evaluate_IoU(model=model, features=data["features_t"], adj=data["adj_t"], img_name=data["img_name"], img_idx=ii + 1, writer=writer, IoU=IoU, save_prediction_np=True) print("evaluate time: {:3.1f} s".format(time.time() - t_now)) print("[{}/{}] time: {:.1f}s\n\n".format( ii + 1, len(train_dataloader), t_now - t4epoch)) t4epoch = t_now print("======================================") if writer is not None: writer.close() print("training was Finished!") print("Total time elapsed: {:.0f} h {:.0f} m {:.0f} s\n".format( (time.time() - t_start) // 3600, (time.time() - t_start) / 60 % 60, (time.time() - t_start) % 60))
components_c = np.squeeze(components_c) mean_c = np.squeeze(mean_c) explained_variance_c = np.squeeze(explained_variance_c) assert n_components == components_c.shape[0], \ print("The n_components in component_ must equal to the supposed shape.") else: # TODO: We have not achieve the function in class-specific. raise NotImplementedError # build data loader. mask_data = MaskLoader(root=dataset_root, dataset=args.dataset, size=mask_size) mask_loader = DataLoader(mask_data, batch_size=args.batch_size, shuffle=False, num_workers=4) size_data = len(mask_loader) # evaluation. IoUevaluate = IOUMetric(2) print("Start Eva ...") for i, masks in enumerate(mask_loader): print("Eva [{} / {}]".format(i, size_data)) # generate the reconstruction mask. masks = masks.view(masks.shape[0], -1).numpy() masks = masks.astype(np.float32) # pre-process. if sigmoid: value_random = VALUE_MAX * np.random.rand(masks.shape[0], masks.shape[1]) value_random = np.maximum(value_random, VALUE_MIN) masks_random = np.where(masks > value_random, 1 - value_random, value_random) masks_random = inverse_sigmoid(masks_random) else: masks_random = masks # --> encode --> decode.