def val(args): model = get_model(args) model.eval() evaluations = NoteEvaluation.Evaluation(args) for group in range(4): print("-------------GROUP %d-------------" % (group)) args.group = group evaluations.group = args.group val_dataloader = val_loader(args) restore(args, model) it = 0 for data in val_dataloader: begin_time = time.time() it = it + 1 query_img, query_mask, support_img, support_mask, idx, size = data query_img, query_mask, support_img, support_mask, idx \ = query_img.cuda(), query_mask.cuda(), support_img.cuda(), support_mask.cuda(), idx.cuda() with torch.no_grad(): logits = model(query_img, support_img, support_mask) query_img = F.upsample(query_img, size=(size[0], size[1]), mode='bilinear') query_mask = F.upsample(query_mask, size=(size[0], size[1]), mode='nearest') values, pred = model.get_pred(logits, query_img) evaluations.update_evl(idx, query_mask, pred, 0) end_time = time.time() ImgPerSec = 1 / (end_time - begin_time) print("It has tested %d, %.2f images/s" % (it, ImgPerSec), end="\r") print("Group %d: %.4f " % (args.group, evaluations.group_mean_iou[args.group])) iou = evaluations.iou_list print('IOU:', iou) mIoU = np.mean(iou) print('mIoU: ', mIoU) print("group0_iou", evaluations.group_mean_iou[0]) print("group1_iou", evaluations.group_mean_iou[1]) print("group2_iou", evaluations.group_mean_iou[2]) print("group3_iou", evaluations.group_mean_iou[3]) print(evaluations.group_mean_iou) #print(evaluations.iou_list) return mIoU, iou, evaluations
def get_model(args): model = eval(args.arch).OneModel(args) opti_A = my_optim.get_finetune_optimizer(args, model) model = model.cuda() print('Number of Parameters: %d' % (get_model_para_number(model))) if args.start_count > 0: restore(args, model) print("Resume training...") return model, opti_A
def get_model(args): model = eval(args.arch).model(pretrained=False, num_classes=args.num_classes, threshold=args.threshold, args=args) model.cuda() model = torch.nn.DataParallel(model, range(args.num_gpu)) optimizer = my_optim.get_finetune_optimizer(args, model) if args.resume == 'True': restore(args, model, optimizer, including_opt=False) return model, optimizer
def get_model(args): model = eval(args.arch).Inception3(num_classes=args.num_classes, args=args, threshold=args.threshold) model = torch.nn.DataParallel(model, range(args.num_gpu)) model.cuda() optimizer = my_optim.get_optimizer(args, model) if args.resume == 'True': restore(args, model, optimizer) return model, optimizer
def get_model(args): model = eval(args.arch).OneModel(args) model = model.cuda() print('Number of Parameters: %d'%(get_model_para_number(model))) # optimizer opti_A = my_optim.get_finetune_optimizer(args, model) # if os.path.exists(args.restore_from): snapshot_dir = os.path.join(args.snapshot_dir, args.arch, 'group_%d_of_%d'%(args.group, args.num_folds)) print(args.resume) if args.resume: restore(snapshot_dir, model) print("Resume training...") return model, opti_A
def val(args): model = get_model(args) model.eval() for p in model.parameters(): p.requires_grad = False if not os.path.exists(args.snapshot_dir): os.mkdir(args.snapshot_dir) # if not os.path.exists(get_save_dir(args)): # os.makedirs(get_save_dir(args)) hist = np.zeros((21, 21)) for group in range(4): args.group = group print("=" * 20 + "GROUP %d" % (args.group) + "=" * 20) restore(args, model, args.group) pbar = tqdm(total=args.max_steps) pbar.set_description('GROUP %d' % (args.group)) train_loader = data_loader(args) count = 0 for dat in train_loader: count += 1 pbar.update(1) if count > args.max_steps: break que_img, que_mask, supp_img, supp_mask = dat que_img = que_img.cuda() # org_img = get_org_img(que_img.squeeze().cpu().data.numpy()) # cv2.imwrite('query.png', org_img) cat_values = 0 pred_sum = 0 for i in range(5): pos_img = supp_img[i].cuda() pos_mask = supp_mask[i].cuda() pos_mask[pos_mask > 0.] = 1. pos_mask = torch.unsqueeze(pos_mask, dim=1) logits = model(que_img, pos_img, None, pos_mask) out_softmax, pred = model.get_pred(logits, que_img) pred_sum += pred if i == 0: cat_values = out_softmax cat_values[0, :, :] = cat_values[0, :, :] * 0. else: cat_values = torch.cat( (cat_values, out_softmax[1, :, :].unsqueeze(dim=0)), dim=0) val, pred = torch.max(cat_values, dim=0) pred_sum[pred_sum > 0.] = 1.0 pred = pred + args.group * 5 pred = pred_sum * pred tmp_pred = pred.cpu().data.numpy() hist += Metrics.fast_hist( tmp_pred.astype(np.int32), que_mask.squeeze().data.numpy().astype(np.int32), 21) org_img = get_org_img(que_img.squeeze().cpu().data.numpy()) img = mask_to_img(tmp_pred, org_img) cv2.imwrite('save_bins/que_pred/query_%d.png' % (count), img) # org_img = get_org_img(pos_img.squeeze().cpu().data.numpy()) # cv2.imwrite('supp_%d.png'%(i), org_img) # # np_pred = pred.cpu().data.numpy() # cv2.imwrite('%d.png'%(i), np_pred*255) miou = Metrics.get_voc_iou(hist) print('IOU:', miou) print("BMVC:", np.mean(miou[group * 5 + 1:(group + 1) * 5 + 1])) pbar.close() print("=" * 20 + "Overall" + "=" * 20) miou = Metrics.get_voc_iou(hist) print('IOU:', miou, np.mean(miou), np.mean(miou[1:])) binary_hist = np.array((hist[0, 0], hist[0, 1:].sum(), hist[1:, 0].sum(), hist[1:, 1:].sum())).reshape((2, 2)) bin_iu = np.diag(binary_hist) / (binary_hist.sum(1) + binary_hist.sum(0) - np.diag(binary_hist)) print('Bin_iu:', bin_iu)
def val(args): num_classes = 20 tp_list = [0] * num_classes fp_list = [0] * num_classes fn_list = [0] * num_classes iou_list = [0] * num_classes hist = np.zeros((21, 21)) scorer = SegScorer(num_classes=21) for group in range(4): datalayer = SSDatalayer(group) for count in tqdm(range(1000)): model, optimizer = get_model(args) model.eval() restore(args, model, group) dat = datalayer.dequeue() ref_img = dat['second_img'][0] query_img = dat['first_img'][0] query_label = dat['second_label'][0] ref_label = dat['first_label'][0] # query_img = dat['second_img'][0] # ref_img = dat['first_img'][0] # ref_label = dat['second_label'][0] # query_label = dat['first_label'][0] deploy_info = dat['deploy_info'] semantic_label = deploy_info['first_semantic_labels'][0][0] - 1 ref_img, ref_label = torch.Tensor(ref_img).cuda(), torch.Tensor( ref_label).cuda() query_img, query_label = torch.Tensor( query_img).cuda(), torch.Tensor(query_label[0, :, :]).cuda() # ref_img = ref_img*ref_label ref_img_var, query_img_var = Variable(ref_img), Variable(query_img) query_label_var, ref_label_var = Variable(query_label), Variable( ref_label) ref_img_var = torch.unsqueeze(ref_img_var, dim=0) #1*3*500*375 ref_label_var = torch.unsqueeze(ref_label_var, dim=1) #1*500*375 query_img_var = torch.unsqueeze(query_img_var, dim=0) #1*3*375*500 query_label_var = torch.unsqueeze(query_label_var, dim=0) #1*375*500 logits_A, logits_B = model.forward_1way_1shot_heat( query_img_var, ref_img_var, query_label_var, ref_label_var) # logits_A = loss_val, cluster_loss, loss_bce = model.get_loss( logits_A, ref_label_var) optimizer.zero_grad() loss_val.backward() optimizer.step() values, pred = model.get_pred( logits_B, query_img_var) #values=2*375*500; pred=375*500 pred = pred.data.cpu().numpy().astype(np.int32) #187500 query_label = query_label.cpu().numpy().astype(np.int32) #187500 class_ind = int(deploy_info['first_semantic_labels'][0][0] ) - 1 # because class indices from 1 in data layer scorer.update(pred, query_label, class_ind + 1) tp, tn, fp, fn = measure(query_label, pred) # iou_img = tp/float(max(tn+fp+fn,1)) tp_list[class_ind] += tp fp_list[class_ind] += fp fn_list[class_ind] += fn # max in case both pred and label are zero iou_list = [ tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic], 1)) for ic in range(num_classes) ] tmp_pred = pred tmp_pred[tmp_pred > 0.5] = class_ind + 1 tmp_gt_label = query_label tmp_gt_label[tmp_gt_label > 0.5] = class_ind + 1 hist += Metrics.fast_hist(tmp_pred, query_label, 21) print("-------------GROUP %d-------------" % (group)) print iou_list class_indexes = range(group * 5, (group + 1) * 5) print 'Mean:', np.mean(np.take(iou_list, class_indexes)) print('BMVC IOU', np.mean(np.take(iou_list, range(0, 20)))) miou = Metrics.get_voc_iou(hist) print('IOU:', miou, np.mean(miou)) binary_hist = np.array((hist[0, 0], hist[0, 1:].sum(), hist[1:, 0].sum(), hist[1:, 1:].sum())).reshape((2, 2)) bin_iu = np.diag(binary_hist) / (binary_hist.sum(1) + binary_hist.sum(0) - np.diag(binary_hist)) print('Bin_iu:', bin_iu)
def val(args): losses = AverageMeter() model = get_model(args) model.eval() num_classes = 20 tp_list = [0]*num_classes fp_list = [0]*num_classes fn_list = [0]*num_classes iou_list = [0]*num_classes hist = np.zeros((21, 21)) scorer = SegScorer(num_classes=21) for group in range(4): datalayer = SSDatalayer(group, 5) restore(args, model, group) for count in tqdm(range(1000)): dat = datalayer.dequeue() query_img = dat['second_img'] ref_img = dat['first_img'] ref_label = dat['second_label'] query_label = dat['first_label'] # print(dat.keys(), len(ref_img), len(query_img), len(query_label), len(ref_label)) # exit(0) # ref_img = dat['second_img'][0] # query_img = dat['first_img'][0] # query_label = dat['second_label'][0] # ref_label = dat['first_label'][0] deploy_info = dat['deploy_info'] semantic_label = deploy_info['first_semantic_labels'][0][0] - 1 # ref_img, ref_label = torch.Tensor(ref_img).cuda(), torch.Tensor(ref_label).cuda() query_img, query_label = torch.Tensor(query_img[0]).cuda(), torch.Tensor(query_label[0][0,:,:]).cuda() query_img_var = Variable(query_img) query_label_var = Variable(query_label) # ref_img_var, query_img_var = Variable(ref_img), Variable(query_img) # query_label_var, ref_label_var = Variable(query_label), Variable(ref_label) # ref_img_var = torch.unsqueeze(ref_img_var,dim=0) # ref_label_var = torch.unsqueeze(ref_label_var, dim=1) query_img_var = torch.unsqueeze(query_img_var, dim=0) query_label_var = torch.unsqueeze(query_label_var, dim=0) ref_img_var_list = [img for img in ref_img] ref_label_var_list = [label for label in ref_label] for p in model.parameters(): p.requires_grad = False # logits = model(query_img_var, ref_img_var, ref_label_var,ref_label_var) logits = model.forward_5shot_avg(query_img_var, ref_img_var_list, ref_label_var_list) # w, h = query_label.size() # outB_side = F.upsample(outB_side, size=(w, h), mode='bilinear') # out_side = F.softmax(outB_side, dim=1).squeeze() # values, pred = torch.max(out_side, dim=0) values, pred = model.get_pred(logits, query_img_var) pred = pred.data.cpu().numpy() query_label = query_label.cpu().numpy() class_ind = int(deploy_info['first_semantic_labels'][0][0])-1 # because class indices from 1 in data layer scorer.update(pred, query_label, class_ind+1) tp, tn, fp, fn = measure(query_label, pred) # iou_img = tp/float(max(tn+fp+fn,1)) tp_list[class_ind] += tp fp_list[class_ind] += fp fn_list[class_ind] += fn # max in case both pred and label are zero iou_list = [tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic],1)) for ic in range(num_classes)] tmp_pred = pred tmp_pred[tmp_pred>0.5] = class_ind+1 tmp_gt_label = query_label tmp_gt_label[tmp_gt_label>0.5] = class_ind+1 hist += Metrics.fast_hist(tmp_pred, query_label, 21) print("-------------GROUP %d-------------"%(group)) print iou_list class_indexes = range(group*5, (group+1)*5) print 'Mean:', np.mean(np.take(iou_list, class_indexes)) print('BMVC IOU', np.mean(np.take(iou_list, range(0,20)))) miou = Metrics.get_voc_iou(hist) print('IOU:', miou, np.mean(miou)) scores = scorer.score() for k in scores.keys(): print(k, np.mean(scores[k]), scores[k])
def val(args): losses = AverageMeter() num_classes = 20 tp_list = [0] * num_classes fp_list = [0] * num_classes fn_list = [0] * num_classes iou_list = [0] * num_classes hist = np.zeros((21, 21)) scorer = SegScorer(num_classes=21) #test_loader = val_loader(args) for group in range(4): datalayer = SSDatalayer(group, k_shot=1) for count in tqdm(range(1000)): model, optimizer = get_model(args) model.eval() restore(args, model, group) dat = datalayer.dequeue() s_class_one = dat['first_img'][0] s_one_label = dat['first_label'][0] s_class_two = dat['first_img'][1] s_two_label = dat['first_label'][1] query_img = dat['second_img'][0] query_label = dat['second_label'][0] s1_img_list = [warper_img(img) for img in s_class_one] s1_label_list = [warper_img(img) for img in s_one_label] s2_img_list = [warper_img(img) for img in s_class_two] s2_label_list = [warper_img(img) for img in s_two_label] query_img, query_label = torch.Tensor( query_img).cuda(), torch.Tensor(query_label[0, :, :]).cuda() support_img_var = [] support_img_var.append(s1_img_list) support_img_var.append(s2_img_list) support_label_var = [] support_label_var.append(s1_label_list) support_label_var.append(s2_label_list) query_img = torch.unsqueeze(query_img, dim=0) # 1*3*375*500 deploy_info = dat['deploy_info'] semantic_label = deploy_info['second_semantic_labels'][0] - 1 logits_A_one, logits_A_two, logits_B = model.forward_2way_1shot_avg( query_img, support_img_var, support_label_var) loss_A_one, cluster_loss, loss_bce = model.get_2way_loss( logits_A_one, support_label_var[0][0]) loss_A_two, cluster_loss, loss_bce = model.get_2way_loss( logits_A_two, support_label_var[1][0]) loss_val = loss_A_one + loss_A_two optimizer.zero_grad() loss_val.backward() optimizer.step() values, pred = model.get_pred( logits_B, query_img) #values=375*500; pred=375*500 w, h = query_label.size() pred = pred.view(w, h) pred = pred.data.cpu().numpy().astype(np.int32) #187500 query_label = query_label.data.cpu().numpy().astype( np.int32) #187500 class_ind = int(deploy_info['second_semantic_labels'][0] ) - 1 # because class indices from 1 in data layer scorer.update(pred, query_label, class_ind + 1) tp, tn, fp, fn = measure(query_label, pred) # iou_img = tp/float(max(tn+fp+fn,1)) tp_list[class_ind] += tp fp_list[class_ind] += fp fn_list[class_ind] += fn # max in case both pred and label are zero iou_list = [ tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic], 1)) for ic in range(num_classes) ] tmp_pred = pred tmp_pred[tmp_pred > 0.5] = class_ind + 1 tmp_gt_label = query_label tmp_gt_label[tmp_gt_label > 0.5] = class_ind + 1 hist += Metrics.fast_hist(tmp_pred, query_label, 21) print("-------------GROUP %d-------------" % (group)) print iou_list class_indexes = range(group * 5, (group + 1) * 5) print 'Mean:', np.mean(np.take(iou_list, class_indexes)) print('BMVC IOU', np.mean(np.take(iou_list, range(0, 20)))) miou = Metrics.get_voc_iou(hist) print('IOU:', miou, np.mean(miou)) binary_hist = np.array((hist[0, 0], hist[0, 1:].sum(), hist[1:, 0].sum(), hist[1:, 1:].sum())).reshape((2, 2)) bin_iu = np.diag(binary_hist) / (binary_hist.sum(1) + binary_hist.sum(0) - np.diag(binary_hist)) print('Bin_iu:', bin_iu)
def val(args): model = get_model(args) model.eval() num_classes = 20 tp_list = [0]*num_classes fp_list = [0]*num_classes fn_list = [0]*num_classes iou_list = [0]*num_classes hist = np.zeros((21, 21)) scorer = SegScorer(num_classes=21) for group in range(4): datalayer = SSDatalayer(group) restore(args, model, group) for count in tqdm(range(1000)): dat = datalayer.dequeue() ref_img = dat['second_img'][0] query_img = dat['first_img'][0] query_label = dat['second_label'][0] ref_label = dat['first_label'][0] # query_img = dat['second_img'][0] # ref_img = dat['first_img'][0] # ref_label = dat['second_label'][0] # query_label = dat['first_label'][0] deploy_info = dat['deploy_info'] semantic_label = deploy_info['first_semantic_labels'][0][0] - 1 ref_img, ref_label = torch.Tensor(ref_img).cuda(), torch.Tensor(ref_label).cuda() query_img, query_label = torch.Tensor(query_img).cuda(), torch.Tensor(query_label[0,:,:]).cuda() # ref_img = ref_img*ref_label ref_img_var, query_img_var = Variable(ref_img), Variable(query_img) query_label_var, ref_label_var = Variable(query_label), Variable(ref_label) ref_img_var = torch.unsqueeze(ref_img_var,dim=0) ref_label_var = torch.unsqueeze(ref_label_var, dim=1) query_img_var = torch.unsqueeze(query_img_var, dim=0) query_label_var = torch.unsqueeze(query_label_var, dim=0) logits = model(query_img_var, ref_img_var, ref_label_var,ref_label_var) # w, h = query_label.size() # outB_side = F.upsample(outB_side, size=(w, h), mode='bilinear') # out_side = F.softmax(outB_side, dim=1).squeeze() # values, pred = torch.max(out_side, dim=0) values, pred = model.get_pred(logits, query_img_var) pred = pred.data.cpu().numpy().astype(np.int32) query_label = query_label.cpu().numpy().astype(np.int32) class_ind = int(deploy_info['first_semantic_labels'][0][0])-1 # because class indices from 1 in data layer scorer.update(pred, query_label, class_ind+1) tp, tn, fp, fn = measure(query_label, pred) # iou_img = tp/float(max(tn+fp+fn,1)) tp_list[class_ind] += tp fp_list[class_ind] += fp fn_list[class_ind] += fn # max in case both pred and label are zero iou_list = [tp_list[ic] / float(max(tp_list[ic] + fp_list[ic] + fn_list[ic],1)) for ic in range(num_classes)] tmp_pred = pred tmp_pred[tmp_pred>0.5] = class_ind+1 tmp_gt_label = query_label tmp_gt_label[tmp_gt_label>0.5] = class_ind+1 hist += Metrics.fast_hist(tmp_pred, query_label, 21) print("-------------GROUP %d-------------"%(group)) print iou_list class_indexes = range(group*5, (group+1)*5) print 'Mean:', np.mean(np.take(iou_list, class_indexes)) print('BMVC IOU', np.mean(np.take(iou_list, range(0,20)))) miou = Metrics.get_voc_iou(hist) print('IOU:', miou, np.mean(miou)) binary_hist = np.array((hist[0, 0], hist[0, 1:].sum(),hist[1:, 0].sum(), hist[1:, 1:].sum())).reshape((2, 2)) bin_iu = np.diag(binary_hist) / (binary_hist.sum(1) + binary_hist.sum(0) - np.diag(binary_hist)) print('Bin_iu:', bin_iu) scores = scorer.score() for k in scores.keys(): print(k, np.mean(scores[k]), scores[k])