def validate(args): print('\nvalidating ... ', flush=True, end='') model = get_model(args) model.eval() val_loader = test_data_loader(args) if not os.path.exists(args.save_dir): os.mkdir(args.save_dir) with torch.no_grad(): for idx, dat in tqdm(enumerate(val_loader)): img_name, img, label_in, sal = dat label = label_in.cuda(non_blocking=True) logits, _, _, _ = model(img) last_featmaps = model.module.get_heatmaps() cv_im = cv2.imread(img_name[0]) cv_im_gray = cv2.cvtColor(cv_im, cv2.COLOR_BGR2GRAY) height, width = cv_im.shape[:2] for l, featmap in enumerate(last_featmaps): maps = featmap.cpu().data.numpy() im_name = args.save_dir + img_name[0].split('/')[-1][:-4] labels = label_in.long().numpy()[0] for i in range(int(args.num_classes)): if labels[i] == 1: att = maps[i] att[att < 0] = 0 att = att / (np.max(att) + 1e-8) att = np.array(att * 255, dtype=np.uint8) out_name = im_name + '_{}.png'.format(i) att = cv2.resize(att, (width, height), interpolation=cv2.INTER_CUBIC) cv2.imwrite(out_name, att)
args = parser.parse_args() print(args) output_dir = os.path.join(args.img_dir, "refined_pseudo_segmentation_labels") if not os.path.exists(output_dir): os.makedirs(output_dir) """ model load """ #model = vgg16(pretrained=True, delta=args.delta) model = vgg16() model = model.cuda() model.eval() ckpt = torch.load(args.checkpoint, map_location='cpu') model.load_state_dict(ckpt['model'], strict=True) """ dataloader """ data_loader = test_data_loader(args) palette = get_palette() for idx, dat in enumerate(data_loader): print("[%03d/%03d]" % (idx, len(data_loader)), end="\r") img, label, sal_map, _, img_name = dat label = label.cuda() img = img.cuda() _, H, W = sal_map.shape localization_maps = np.zeros((20, H, W), dtype=np.float32) """ single-scale testing """ for s in [256, 320, 384]: _img = F.interpolate(img,
def validate(args): print('\nvalidating ... ', flush=True, end='') model = get_model(args) model.eval() val_loader = test_data_loader(args) if not os.path.exists(args.save_dir): os.mkdir(args.save_dir) with torch.no_grad(): for idx, dat in tqdm(enumerate(val_loader)): if idx <= 500000 and idx >= 0: img_name1, img_name2, input1, input2_list, input1_transforms, label1, label2 = dat posi_index = np.where(label1.squeeze().cpu().numpy() == 1)[0] assert len(posi_index) == len(input2_list) img_list = [] for input2_all in input2_list: img_all = [] for input2 in input2_all: img = [input1, input2] img_all.append(img) img_list.append(img_all) assert len(posi_index) == len(img_list) img2 = [input1, input1_transforms[0]] img3 = [input1, input1_transforms[1]] img4 = [input1, input1_transforms[2]] label_new = label1 + label2 label_new[label_new != 2] = 0 label_new[label_new == 2] = 1 label1_comple = label1 - label_new label2_comple = label2 - label_new assert (label1_comple >= 0).all() and (label2_comple >= 0).all() co_feature1_list = [] for j in range(len(posi_index)): co_feature1_all = None label_one = posi_index[j] for img in img_list[j]: _, _ = model(img) _, _, co_feature1, _, _, _ = model.module.get_heatmaps( ) if co_feature1_all is None: co_feature1_all = co_feature1 else: co_feature1_all = co_feature1_all + co_feature1 # co_feature1_all.append(co_feature1) co_feature1_all = co_feature1_all / len(img_list[j]) co_feature1_list.append([co_feature1_all]) co_feature1_list = feature_map_merge(co_feature1_list, label1) logits2, co_logits2 = model(img2) featmaps2_1, featmaps2_2, co_feature2_1, co_feature2_2, _, _ = model.module.get_heatmaps( ) co_feature2_2 = co_feature2_2.flip(3) logits3, co_logits3 = model(img3) featmaps3_1, featmaps3_2, co_feature3_1, co_feature3_2, _, _ = model.module.get_heatmaps( ) co_feature3_2 = F.upsample(co_feature3_2, (32, 32), mode='bicubic') logits4, co_logits4 = model(img4) featmaps4_1, featmaps4_2, co_feature4_1, co_feature4_2, _, _ = model.module.get_heatmaps( ) co_feature4_2 = F.upsample(co_feature4_2, (32, 32), mode='bicubic') cv_im = cv2.imread(img_name1[0]) cv_im_gray = cv2.cvtColor(cv_im, cv2.COLOR_BGR2GRAY) height, width = cv_im.shape[:2] cv_im2 = cv2.imread(img_name2[0]) cv_im_gray2 = cv2.cvtColor(cv_im2, cv2.COLOR_BGR2GRAY) height2, width2 = cv_im2.shape[:2] save_feature_maps_all([ co_feature2_1, co_feature2_2, co_feature3_2, co_feature4_2 ] + co_feature1_list, args, label1, img_name1, height, width, cv_im, cv_im_gray) else: continue