Ejemplo n.º 1
0
 def _make_model(self):
     # prepare network
     self.logger.info("Creating graph and optimizer...")
     model = get_pose_net(self.backbone, False, self.jointnum)
     model = DataParallel(model).cuda()
     model.load_state_dict(torch.load(self.modelpath)['network'])
     single_pytorch_model = model.module
     single_pytorch_model.eval()
     self.model = single_pytorch_model
Ejemplo n.º 2
0
    def _make_model(self):
        # prepare network
        self.logger.info("Creating graph and optimizer...")
        model = get_pose_net(cfg, True, self.joint_num)
        model = DataParallel(model).cuda()
        optimizer = self.get_optimizer(model)
        if cfg.continue_train:
            start_epoch, model, optimizer = self.load_model(model, optimizer)
        else:
            start_epoch = 0
        model.train()

        self.start_epoch = start_epoch
        self.model = model
        self.optimizer = optimizer
    def _make_model(self):
        
        model_path = os.path.join(cfg.model_dir, 'snapshot_%d.pth.tar' % self.test_epoch)
        assert os.path.exists(model_path), 'Cannot find model at ' + model_path
        self.logger.info('Load checkpoint from {}'.format(model_path))
        
        # prepare network
        self.logger.info("Creating graph...")
        model = get_pose_net(cfg, False, self.joint_num)
        model = DataParallel(model).cuda()
        ckpt = torch.load(model_path)
        model.load_state_dict(ckpt['network'])
        model.eval()

        self.model = model
Ejemplo n.º 4
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    
    
    args = parse_args()
    
    
    # model loading
    model = get_pose_net(
        config, is_train=True
    )
    # model = model.half()
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")
    
    valid_dataset = coco(
        config,
        config.DATASET.ROOT,
        config.DATASET.TEST_SET,
        is_train=False,
        is_eval=True,
        transform=tfms.Compose([
            tfms.ToTensor(),
        ])
    )
    
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE,
        shuffle=False,
        # num_workers=confi g.WORKERS,
        # pin_memory=True
        drop_last=False,
    )
            

    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            _, model_dic, _, _ = info
            
            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.\n')
            except Exception as e:
                raise FileNotFoundError('Model shape is different. Plz check.')
            

    end = time.time()
    logging.info('Evaluation Ready\n')
    
    result = evaluate(config, model, valid_loader)
    
    with open(f'{config.result_dir}/data.json', 'w') as f:
        json.dump(result, f)  
    logging.info(f"Taken {time.time()-end:.5f}s\n")
    
    
    os.makedirs(config.result_dir, exist_ok=True)
    
    logging.info(f"From a Pose estimator.\n")
    valid_dataset.keypoint_eval('/home/mah/workspace/PoseFix/data/input_pose_path/keypoints_valid2017_results.json', config.result_dir + '/ori/')
    
    logging.info(f"Pose Estimator with PoseFix.\n")
    valid_dataset.keypoint_eval(result, config.result_dir + '/pred')
Ejemplo n.º 5
0
joint_num = 21
joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist',
               'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee',
               'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine',
               'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
flip_pairs = ((2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18),
              (19, 20))
skeleton = ((0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9),
            (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3),
            (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18))

# snapshot load
model_path = './snapshot_%d.pth.tar' % int(args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
model = get_pose_net(cfg, False, joint_num)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
model.eval()

# prepare input image
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)
])
img_path = 'input.jpg'
original_img = cv2.imread(img_path)
original_img_height, original_img_width = original_img.shape[:2]

# prepare bbox
Ejemplo n.º 6
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    args = parse_args()
    reset_config(config, args)

    # model loading
    model = get_pose_net(config, is_train=True)
    # model = model.half()
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    logging.info(f"Training on CUDA: {torch.cuda.is_available()}")
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")

    # Data loading process
    train_dataset = coco(
        config,
        config.DATASET.ROOT,
        config.DATASET.TRAIN_SET,
        is_train=True,
        is_eval=False,
        transform=tfms.RandomErasing(p=0.8, scale=(0.5, 0.5)),
    )

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE,
        shuffle=True,
        drop_last=True)

    valid_dataset = coco(config,
                         config.DATASET.ROOT,
                         config.DATASET.TEST_SET,
                         is_train=False,
                         is_eval=True,
                         transform=None)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE,
        shuffle=False,
        drop_last=False)

    start_epoch = config.TRAIN.BEGIN_EPOCH
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=5e-4,
                                 weight_decay=1e-5,
                                 eps=1e-5)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[90, 120],
                                                     gamma=0.1)
    losschecker = BestLossChecker()

    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            start_epoch, model_dic, optim_dic, sched_dic = info

            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.')

                optimizer.load_state_dict(optim_dic)
                logging.info('Optimizer Loaded.')

                if sched_dic is not None:
                    scheduler.load_state_dict(sched_dic)
                else:
                    scheduler.last_epoch = start_epoch
                scheduler.optimizer.load_state_dict(optim_dic)
                logging.info('Scheduler Loaded.')
                logging.info('All Weights Loaded...\n')
            except Exception as e:
                start_epoch = config.TRAIN.BEGIN_EPOCH
                logging.info('Model shape is different. Plz check.')
                logging.info('Starts with init weights...\n')

    end = time.time()
    logging.info('Training Ready\n')

    for epoch in range(start_epoch, config.TRAIN.END_EPOCH):
        if epoch == 10:
            config.TEST.FLIP_TEST = True

        if epoch % 5 == 0 and epoch != 0:
            result = evaluate(config, model, valid_loader)

            if epoch % 100 == 0 and epoch != 0:
                with open(f'{config.result_dir}/data.json', 'w') as f:
                    json.dump(result, f)

                os.makedirs(config.result_dir, exist_ok=True)
            valid_dataset.keypoint_eval(result, config.result_dir + '/pred')
            valid_dataset.keypoint_eval(
                './data/input_pose_path/keypoints_valid2017_results.json',
                config.result_dir + '/ori/')
            end = time.time()

        losses = train(config,
                       epoch=epoch,
                       loader=train_loader,
                       model=model,
                       optimizer=optimizer)
        total_loss, hm_loss, coord_loss = losses
        is_best = losschecker.update(epoch, total_loss, hm_loss, coord_loss)

        try:
            state_dict = model.module.state_dict()
        except Exception as e:
            state_dict = model.state_dict()

        save_checkpoint(
            {
                'epoch': epoch,
                'model': get_model_name(config),
                'state_dict': state_dict,
                'optimizer': optimizer.state_dict(),
            }, is_best, "./weights_2")

        scheduler.step()
        spent = time.time() - end
        hour = int(spent // 3600)
        min = int((spent - hour * 3600) // 60)
        second = (spent - hour * 3600 - min * 60)

        logging.info(
            f"Epoch {epoch} taken {hour:d}h{min:2d}m {second:2.3f}s\n")
        end = time.time()
Ejemplo n.º 7
0
               'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee',
               'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine',
               'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
# 'Pelvis' 'RHip' 'RKnee' 'RAnkle' 'LHip' 'LKnee' 'LAnkle' 'Spine1' 'Neck' 'Head' 'Site' 'LShoulder' 'LElbow' 'LWrist' 'RShoulder' 'RElbow' 'RWrist
flip_pairs = ((2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18),
              (19, 20))
# skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
skeleton = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8,
                                                                           14),
            (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6))

# snapshot load
model_path = args.model

# print('Load checkpoint from {}'.format(model_path))
model = get_pose_net(args.backbone, False, joint_num)
model = DataParallel(model).cuda()
# print("after DataParallel", model)
ckpt = torch.load(model_path)
# print("ckpt", ckpt['network'])
model.load_state_dict(ckpt['network'])
model.eval()

# prepare input image
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)
])
img_path = args.image
assert osp.exists(img_path), 'Cannot find image at ' + img_path
original_img = cv2.imread(img_path)
Ejemplo n.º 8
0
        gpus[1] = len(mem_info()) if not gpus[1].isdigit() else int(gpus[1]) + 1
        args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
    
    assert args.test_epoch, 'Test epoch is required.'
    return args

# argument parsing
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.benchmark = True

# snapshot load
model_path = './snapshot_%d.pth.tar' % int(args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
model = get_pose_net(cfg, False)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
model.eval()

# prepare input image
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)])
img_path = 'input.jpg'
original_img = cv2.imread(img_path)
original_img_height, original_img_width = original_img.shape[:2]

# prepare bbox for each human
bbox_list = [
[139.41, 102.25, 222.39, 241.57],\
[287.17, 61.52, 74.88, 165.61],\
Ejemplo n.º 9
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    
    
    args = parse_args()
    reset_config(config, args)
    
    # model loading
    model = get_pose_net(
        config, is_train=True
    )
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")
    logging.info(f"Model on CUDA: {torch.cuda.is_available()}")
    
    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            _, model_dic, _, _ = info
            
            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.\n')
            except Exception as e:
                raise FileNotFoundError('Model shape is different. Plz check.')
                
    
    # dataset = TestDatset(config, './test/cam3_test_short.mp4', './test/detection_result_cam3_test.json', None, (0, 0))
    dataset = TestDatset(config, args.video_path, args.detection_json, None, (0, 0))
    loader  = DataLoader(
        dataset,
        batch_size=8,
        shuffle=False,
        drop_last=False,
    )
    
    cnt = 0
    vis = True
    total_size = len(loader)
    
    model.eval()
    with torch.no_grad():
        for i, data in tqdm(enumerate(loader)):
            if i == 1000:
                break
            
            if i % (len(loader)//10) == 0:
                logging.info(f'{i/total_size*100:2.2f}%   [{str(i).zfill(len(str(total_size)))} | {total_size}]')
            
            imgs, coords, valids, scores, crop_infos, frame_ids = data
            
            input_pose_hms = render_gaussian_heatmap(config, coords, config.MODEL.IMAGE_SIZE, config.MODEL.INPUT_SIGMA, valids)
            heatmap_outs = model(imgs.cuda().float(), input_pose_hms.cuda().float())
            predicts = extract_coordinate(config, heatmap_outs, config.MODEL.NUM_JOINTS)
            
            if config.TEST.FLIP_TEST:
                flip_imgs = np.flip(imgs.cpu().numpy(), 3).copy()
                flip_imgs = torch.from_numpy(flip_imgs).cuda()
                flip_input_pose_coords = coords.clone()
                flip_input_pose_coords[:,:,0] = config.MODEL.IMAGE_SIZE[1] - 1 - flip_input_pose_coords[:,:,0]
                flip_input_pose_valids = valids.clone()
                for (q, w) in config.kps_symmetry:
                    flip_input_pose_coords_w, flip_input_pose_coords_q = flip_input_pose_coords[:,w,:].clone(), flip_input_pose_coords[:,q,:].clone()
                    flip_input_pose_coords[:,q,:], flip_input_pose_coords[:,w,:] = flip_input_pose_coords_w, flip_input_pose_coords_q
                    flip_input_pose_valids_w, flip_input_pose_valids_q = flip_input_pose_valids[:,w].clone(), flip_input_pose_valids[:,q].clone()
                    flip_input_pose_valids[:,q], flip_input_pose_valids[:,w] = flip_input_pose_valids_w, flip_input_pose_valids_q

                flip_input_pose_hms = render_gaussian_heatmap(config, flip_input_pose_coords, config.MODEL.IMAGE_SIZE, config.MODEL.INPUT_SIGMA, flip_input_pose_valids)
                flip_heatmap_outs = model(flip_imgs.cuda().float(), flip_input_pose_hms.cuda().float())
                flip_coords = extract_coordinate(config, flip_heatmap_outs.float(), config.MODEL.NUM_JOINTS)

                flip_coords[:,:,0] = config.MODEL.IMAGE_SIZE[1] - 1 - flip_coords[:,:,0]
                for (q, w) in config.kps_symmetry:
                    flip_coord_w, flip_coord_q = flip_coords[:,w,:].clone(), flip_coords[:,q,:].clone()
                    flip_coords[:,q,:], flip_coords[:,w,:] = flip_coord_w, flip_coord_q
                
                predicts += flip_coords
                predicts /= 2

            
            kps_result = np.zeros((len(imgs), config.MODEL.NUM_JOINTS, 3))
            area_save = np.zeros(len(imgs))
            
            visualize_pred_heatmaps, _ = torch.max(heatmap_outs, dim=1)
            visualize_pred_max = torch.max(visualize_pred_heatmaps)
            visualize_pred_min = torch.min(visualize_pred_heatmaps)
            visualize_pred_heatmaps = (visualize_pred_heatmaps-visualize_pred_min)/(visualize_pred_max-visualize_pred_min)
            visualize_pred_heatmaps = torch.reshape(visualize_pred_heatmaps, shape=(imgs.shape[0], 1, *config.MODEL.OUTPUT_SIZE))
            visualize_pred_heatmaps = torch.nn.functional.interpolate(visualize_pred_heatmaps, size=config.MODEL.IMAGE_SIZE, mode='bilinear').permute(0, 2, 3, 1)
            
            
            for j in range(len(predicts)):
                visualize_pred_heatmap = visualize_pred_heatmaps[j].detach().cpu().numpy() * 254
                visualize_pred_heatmap = visualize_pred_heatmap.astype('uint8')
                visualize_pred_heatmap = cv2.applyColorMap(visualize_pred_heatmap, cv2.COLORMAP_JET)
                
                kps_result[j, :, :2] = predicts[j]
                kps_result[j, :, 2]  = valids[j]
                

                crop_info = crop_infos[j, :]
                area = (crop_info[2] - crop_info[0]) * (crop_info[3] - crop_info[1])
                
                if vis and np.any(kps_result[j,:,2]) > 0.9 and area > 96**2:
                    tmpimg = imgs[j].detach().clone().permute(1, 2, 0).numpy()
                    tmpimg = denormalize_input(tmpimg, config)
                    tmpimg = tmpimg.astype('uint8')
                    tmpkps = np.zeros((3,config.MODEL.NUM_JOINTS))
                    tmpkps[:2,:] = kps_result[j,:,:2].transpose(1,0)
                    tmpkps[2,:] = kps_result[j,:,2]
                    _tmpimg = tmpimg.copy()
                    _tmpimg = vis_keypoints(config, _tmpimg, tmpkps)
                    
                    alpha = 0.4
                    _tmpimg = cv2.addWeighted(
                        _tmpimg,
                        1.0 - alpha,
                        visualize_pred_heatmap,
                        alpha,
                        0
                    )
                    
                    tmpkps = np.zeros((3,config.MODEL.NUM_JOINTS))
                    tmpkps[:2,:] = coords[j,:,:2].transpose(1,0)
                    tmpkps[2,:] = 1
                    
                    _tmpimg_orig = tmpimg.copy()
                    
                    # _tmpimg_orig = cv2.addWeighted(
                    #     _tmpimg_orig,
                    #     1.0 - alpha,
                    #     input_pose_hms[j].cpu().numpy(),
                    #     alpha,
                    #     0
                    # )
                    
                    _tmpimg_orig = vis_keypoints(config, _tmpimg_orig, tmpkps)
                    
                    
                    path = os.path.join('./test_result', str('cropped_pred').zfill(4))
                    os.makedirs(path, exist_ok=True)
                    cv2.imwrite(os.path.join(path, str(i * imgs.shape[0] + j) + '_output.jpg'), _tmpimg)
                    
                    path = os.path.join('./test_result', str('cropped_orig').zfill(4))
                    os.makedirs(path, exist_ok=True)
                    cv2.imwrite(os.path.join(path, str(i * imgs.shape[0] + j) + '_output.jpg'), _tmpimg_orig)


                for k in range(config.MODEL.NUM_JOINTS):
                    kps_result[j, k, 0] = kps_result[j, k, 0] / config.MODEL.IMAGE_SIZE[1] * (\
                    crop_infos[j][2] - crop_infos[j][0]) + crop_infos[j][0]
                    kps_result[j, k, 1] = kps_result[j, k, 1] / config.MODEL.IMAGE_SIZE[0] * (\
                    crop_infos[j][3] - crop_infos[j][1]) + crop_infos[j][1]
                    
                    # for mapping back to original
                    coords[j, k, 0] = coords[j, k, 0] / config.MODEL.IMAGE_SIZE[1] * (\
                    crop_infos[j][2] - crop_infos[j][0]) + crop_infos[j][0]
                    coords[j, k, 1] = coords[j, k, 1] / config.MODEL.IMAGE_SIZE[0] * (\
                    crop_infos[j][3] - crop_infos[j][1]) + crop_infos[j][1]
                
                area_save[j] = (crop_infos[j][2] - crop_infos[j][0]) * (crop_infos[j][3] - crop_infos[j][1])
            
            
            if vis:
                visualize_pred_heatmaps, _ = torch.max(heatmap_outs, dim=1)
                visualize_pred_max = torch.max(visualize_pred_heatmaps)
                visualize_pred_min = torch.min(visualize_pred_heatmaps)
                visualize_pred_heatmaps = (visualize_pred_heatmaps-visualize_pred_min)/(visualize_pred_max-visualize_pred_min)
                visualize_pred_heatmaps = torch.reshape(visualize_pred_heatmaps, shape=(imgs.shape[0], 1, *config.MODEL.OUTPUT_SIZE))
                
                for j in range(len(predicts)):
                    if np.any(kps_result[j,:,2] > 0.9):
                        dataset.cap.set(1, int(frame_ids[j].data))
                        _, tmpimg = dataset.cap.read()
                        tmpimg = tmpimg.astype('uint8')
                        
                        tmpkps_pred = np.zeros((3, config.MODEL.NUM_JOINTS))
                        tmpkps_pred[:2,:] = kps_result[j, :, :2].transpose(1,0)
                        tmpkps_pred[2,:] = kps_result[j, :, 2]
                        
                        tmpkps_orig = np.zeros((3, config.MODEL.NUM_JOINTS))
                        tmpkps_orig[:2,:] = coords[j, :, :2].transpose(1,0)
                        tmpkps_orig[2,:] = scores[j]
                        
                        tmpimg_pred = vis_keypoints(config, tmpimg, tmpkps_pred, kp_thresh=0.1)
                        

                        # x1,y1,x2,y2 = crop_infos[j]
                        # h = int(min(y2, tmpimg_pred.shape[0])-int(y1))
                        # w = int(min(x2, tmpimg_pred.shape[1])-int(x1))
                        
                        # visualize_pred_heatmap = torch.nn.functional.interpolate(visualize_pred_heatmaps[j].unsqueeze(0), 
                        #                                                     size=(h, w), 
                        #                                                     mode='bilinear').permute(0, 2, 3, 1).detach().cpu().numpy() * 254
                        # visualize_pred_heatmap = visualize_pred_heatmap[0].astype('uint8')
                        # visualize_pred_heatmap = cv2.applyColorMap(visualize_pred_heatmap, cv2.COLORMAP_JET)
                        
                        
                        # alpha = 0.4
                        # tmpimg_pred[int(y1):int(y1+h), int(x1):int(x1+w)] = cv2.addWeighted(
                        #     tmpimg_pred[int(y1):int(y2), int(x1):int(x2)], 
                        #     1.0 - alpha, 
                        #     visualize_pred_heatmap, 
                        #     alpha, 0)
                        
                        # cv2.imshow("all", tmpimg_pred)
                        # cv2.waitKey()
                        # cv2.destroyWindow("all")
                        
                        tmpimg_orig = vis_keypoints(config, tmpimg, tmpkps_orig, kp_thresh=0.1)
                        
                        
                        path_orig = os.path.join('test_result', str('evaluate_orig'))
                        path_pred = os.path.join('test_result', str('evaluate_pred'))
                        os.makedirs(path_orig, exist_ok=True)
                        os.makedirs(path_pred, exist_ok=True)
                        
                        cv2.imwrite(os.path.join(path_orig, str(cnt) + '.jpg'), tmpimg_orig)
                        cv2.imwrite(os.path.join(path_pred, str(cnt) + '.jpg'), tmpimg_pred)
                        cnt += 1
    
                
    end = time.time()
    logging.info('Test Start\n')
    dataset.cap.relase()