def main(): """Create the model and start the evaluation process.""" args = Parameters().parse() # # # args.method = 'student_res18_pre' args.method = 'student_esp_d' args.dataset = 'camvid_light' args.data_list = "/ssd/yifan/SegNet/CamVid/test.txt" args.data_dir = "/ssd/yifan/" args.num_classes = 11 # args.method='psp_dsn_floor' args.restore_from = "./checkpoint/Camvid/ESP/base_57.8.pth" # args.restore_from="/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth" # args.restore_from = "/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth" # args.restore_from = "/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth" # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst' args.batch_size = 1 print("Input arguments:") for key, val in vars(args).items(): print("{:16} {}".format(key, val)) h, w = map(int, args.input_size.split(',')) input_size = (h, w) print(args) output_path = args.output_path if not os.path.exists(output_path): os.makedirs(output_path) # args.method='psp_dsn' deeplab = get_segmentation_model(args.method, num_classes=args.num_classes) ignore_label = 255 id_to_trainid = { -1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label, 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label, 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4, 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5, 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18 } os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # args.restore_from="/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth" # if 'dense' in args.method: # if args.restore_from is not None: saved_state_dict = torch.load(args.restore_from) c_keys = saved_state_dict.keys() for i in c_keys: flag = i.split('.')[0] if 'module' in flag: deeplab = nn.DataParallel(deeplab) deeplab.load_state_dict(saved_state_dict) if 'module' not in flag: deeplab = nn.DataParallel(deeplab) # if 'dense' not in args.method: # deeplab = nn.DataParallel(deeplab) model = deeplab model.eval() model.cuda() # args.dataset='cityscapes_light' testloader = data.DataLoader(get_segmentation_dataset( args.dataset, root=args.data_dir, list_path=args.data_list, crop_size=(360, 480), mean=IMG_MEAN, scale=False, mirror=False), batch_size=args.batch_size, shuffle=False, pin_memory=True) data_list = [] confusion_matrix = np.zeros((args.num_classes, args.num_classes)) palette = get_palette(20) image_id = 0 for index, batch in enumerate(testloader): if index % 100 == 0: print('%d processd' % (index)) if args.side: image, label, _, size, name = batch elif 'sd' in args.dataset: _, image, label, size, name = batch else: image, label, size, name = batch # print('image name: {}'.format(name)) size = size[0].numpy() output = predict_esp(model, image) # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8) result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8) # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST) m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255)) ma.set_fill_value(m_seg_pred, 20) seg_pred = m_seg_pred for i in range(image.size(0)): image_id += 1 print('%d th segmentation map generated ...' % (image_id)) args.store_output = 'True' output_path = './esp_camvid_base/' if not os.path.exists(output_path): os.mkdir(output_path) if args.store_output == 'True': # print('a') output_im = PILImage.fromarray(seg_pred[i]) output_im.putpalette(palette) output_im.save(output_path + '/' + name[i] + '.png') seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int) ignore_index = seg_gt != 255 seg_gt = seg_gt[ignore_index] seg_pred = seg_pred[ignore_index] confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes) pos = confusion_matrix.sum(1) res = confusion_matrix.sum(0) tp = np.diag(confusion_matrix) IU_array = (tp / np.maximum(1.0, pos + res - tp)) mean_IU = IU_array.mean() print({'meanIU': mean_IU, 'IU_array': IU_array}) print("confusion matrix\n") print(confusion_matrix)
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) testloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list, crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False), batch_size=args.batch_size, shuffle=False, pin_memory=True) for method in methods: args.method = method student = get_segmentation_model(args.method, num_classes=args.num_classes) # from network.md import MobileNet # student=MobileNet() student = add_flops_counting_methods(student) student = student.cuda() student = student.eval() student.start_flops_count() print('method:', method) for i_iter, batch in enumerate(testloader): i_iter += args.start_iters images, labels, _, _ = batch images = Variable(images.cuda()) labels = Variable(labels.long().cuda())