Beispiel #1
1
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    roidb, dataset, _, _, _ = get_roidb_and_dataset(None)
    run_mpii_eval(test_output_dir, roidb, dataset)
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    json_data, _, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    run_posetrack_tracking(test_output_dir, json_data)
def main():
    parser = argparse.ArgumentParser(
        description='Classification model testing')
    parser.add_argument('--config_file',
                        type=str,
                        default=None,
                        help='Optional config file for params')
    parser.add_argument(
        '--store_vis',
        type=bool,
        default=False,  # Just set here when running...
        help='Store a CAM style visualization.')
    parser.add_argument('opts',
                        help='see configs.py for all options',
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()

    # run testing for both
    # Only storing full fc7 features for test because that's the only one
    # we will analyze
    test_net(full_label_fname='test_fullLbl', store_vis=args.store_vis)
    test_net(full_label_fname='train_fullLbl')
    def __init__(self, args):
        # assert False, 'merge config'
        cfg_from_file(args.config)
        cfg.TRAIN.IMS_PER_BATCH = 1
        self._cur = 0
        if 'SEGDISP' in args.network:
            self.load_image = self.load_segdisp_image
            to_test = to_test_segdisp
        else:
            self.load_image = self.load_semseg_image
            to_test = to_test_semseg()
        if args.dataset == 'cityscapes':
            self.input_size = args.input_size
            self.aug_scale = args.aug_scale
            self.label_root = os.path.join(
                os.getcwd(),
                'lib/datasets/data/cityscapes/annotations/val.txt')
            # self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/cityscapes/label_info_fine/test.txt')
            #self.label_root = os.path.join(os.getcwd(),'citycapes/label_info/onlytrain_label_citycapes_right.txt')
            self.num_class = 19
            self.image_shape = [1024, 2048]
        self.load_listname(args)

        #transformer label
        self.transLabel = {label.trainId: label.id for label in labels}  ##
        self.transColor = {label.trainId: label.color for label in labels}
def set_configs(args):
    if not torch.cuda.is_available():
        sys.exit("Need a CUDA device to run the code.")

    if args.cuda or cfg.NUM_GPUS > 0:
        cfg.CUDA = True
    else:
        raise ValueError("Need Cuda device to run !")

    if args.dataset == "vhico":
        cfg.TRAIN.DATASETS = ('vhico_train', )
        cfg.VAL.DATASETS = ('vhico_val', )
        cfg.TEST.DATASETS = ('vhico_test', )
        cfg.UNSEEN.DATASETS = ('vhico_unseen', )
    else:
        raise ValueError("Unexpected args.dataset: {}".format(args.dataset))

    cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    if args.debug:
        cfg.DEBUG = True

    cfg.NUM_GPUS = args.num_gpus
    cfg.VIDEO_FRAME = args.video_frame

    cfg.SOLVER.BACKBONE_LR_SCALAR = args.backbone_lr_scalar

    cfg.FAST_RCNN.ROI_BOX_HEAD = args.roi_box_head
    cfg.DROPOUT = args.dropout

    cfg.OBJ_LOSS = args.obj_loss
    cfg.COM_WEIGHT = args.com_weight
    cfg.HUMAN_OBJ_SPATIAL = args.human_obj_spatial
    cfg.WEIGHT_REG = args.weight_reg
    cfg.L2_WEIGHT = args.l2_weight
    cfg.VIDEO_LOSS = args.video_loss
    cfg.VIDEO_WEIGHT = args.video_weight
    cfg.BINARY_LOSS = args.binary_loss

    cfg.EVAL_SUBSET = args.eval_subset
    cfg.EVAL_MAP = args.eval_map

    cfg.SAVE_MODEL_ITER = 100
    cfg.IOU = args.iou
    cfg.vis = args.vis
    cfg.vis_video = args.vis_video
    cfg.load_ckpt = args.load_ckpt

    ### Overwrite some solver settings from command line arguments
    if args.optimizer is not None:
        cfg.SOLVER.TYPE = args.optimizer
    if args.lr is not None:
        cfg.SOLVER.BASE_LR = args.lr
    if args.lr_decay_gamma is not None:
        cfg.SOLVER.GAMMA = args.lr_decay_gamma
    assert_and_infer_cfg()

    return cfg
Beispiel #6
0
def main():
    args = get_arguments(sys.argv[1:])

    # Reading the config
    cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    # seed
    if args.seed is not None:
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed)

    ngpus_per_node = torch.cuda.device_count()
    args.world_size = int(os.environ["WORLD_SIZE"])
    args.world_size = ngpus_per_node * args.world_size
    args.dist_url = "tcp://127.0.0.1:{}".format(find_free_port())

    print("World size: ", args.world_size, " / URL: {}".format(args.dist_url))
    print("# GPUs per node: ", ngpus_per_node)
    mp.spawn(main_worker,
             nprocs=ngpus_per_node,
             args=(ngpus_per_node, args, cfg))
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    json_data, _, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    #     run_posetrack_tracking(test_output_dir, json_data)
    ##################### add by jianbo #############
    score_ap, score_mot, apAll, preAll, recAll, mota = run_posetrack_tracking(
        test_output_dir, json_data)
    import re, os, json
    from core.config import get_log_dir_path
    tmp_dic = {
        "total_AP": score_ap.tolist(),
        "total_MOTA": score_mot.tolist(),
        "apAll": apAll.tolist(),
        "preAll": preAll.tolist(),
        "recAll": recAll.tolist(),
        "mota": mota.tolist()
    }
    dir_path = get_log_dir_path()
    if not os.path.exists(dir_path):
        os.mkdir(dir_path)
    f = open(dir_path + "/eval.json", "w")
    f.write(json.dumps(tmp_dic))
    f.flush()
    f.close()
def main():
    c2_utils.import_detectron_ops()
    parser = argparse.ArgumentParser(
        description='Classification model testing')
    parser.add_argument('--config_file',
                        type=str,
                        default=None,
                        required=True,
                        help='Optional config file for params')
    parser.add_argument('opts',
                        help='see config.py for all options',
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()

    test(args)
    def __init__(self):
        args = parse_args()
        # Historical code, because we have trained on the Baidu Apoloscape dataset.
        dataset = WAD_CVPR2018(args.dataset_dir)
        cfg.MODEL.NUM_CLASSES = len(
            dataset.eval_class) + 1  # with a background class
        print('load cfg from file: {}'.format(args.cfg_file))
        cfg_from_file(osp.join(this_dir, args.cfg_file))
        if args.nms_soft:
            cfg.TEST.SOFT_NMS.ENABLED = True
        else:
            cfg.TEST.NMS = args.nms

        cfg.RESNETS.IMAGENET_PRETRAINED = False  # Don't need to load imagenet pretrained weights
        assert_and_infer_cfg()

        maskRCNN = Generalized_RCNN()
        maskRCNN.cuda()

        if args.load_ckpt:
            load_name = osp.join(this_dir, args.load_ckpt)
            print("loading checkpoint %s" % (load_name))
            checkpoint = torch.load(load_name,
                                    map_location=lambda storage, loc: storage)
            net_utils.load_ckpt(maskRCNN, checkpoint['model'])

        maskRCNN = mynn.DataParallel(maskRCNN,
                                     cpu_keywords=['im_info', 'roidb'],
                                     minibatch=True,
                                     device_ids=[0])
        maskRCNN.eval()
        self.model = maskRCNN
        self.dataset = dataset
def main():
    parser = argparse.ArgumentParser(
        description='Classification model training')
    parser.add_argument('--test_net',
                        type=bool,
                        default=True,
                        help='Test trained model on test data')
    parser.add_argument('--node_id', type=int, default=0, help='Node id')
    parser.add_argument('--config_file',
                        type=str,
                        default=None,
                        required=True,
                        help='Optional config file for params')
    parser.add_argument('opts',
                        help='see config.py for all options',
                        default=None,
                        nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()

    train(args)
 def __init__(self, args):
     # assert False, 'merge config'
     cfg_from_file(args.config)
     cfg.TRAIN.IMS_PER_BATCH = 1
     args.aug_scale=cfg.TRAIN.SCALES
     args.input_size=cfg.SEM.INPUT_SIZE
     self.input_size=cfg.SEM.INPUT_SIZE
     print ('test scale:',args.aug_scale)
     self._cur = 0
     if args.network == 'Generalized_SEGDISP':
         self.load_image = self.load_segdisp_image
         to_test = to_test_segdisp
     elif args.network=='Generalized_SEMSEG':
         self.load_image = self.load_semseg_image
         to_test = to_test_semseg
         
     else:
         self.load_image = self.load_semseg_image
         to_test = to_test_semseg(args)
     if 'cityscape' in args.dataset:
         self.input_size = args.input_size
         self.aug_scale = args.aug_scale
         if 'train_on_val' in args.dataset:
             self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/cityscapes/annotations/Cityscape_disp_SegFlow_train.txt')
         else:
             self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/cityscapes/annotations/val.txt')
         # self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/cityscapes/label_info_fine/test.txt')
         #self.label_root = os.path.join(os.getcwd(),'citycapes/label_info/onlytrain_label_citycapes_right.txt')
         self.num_class = 19
         self.image_shape=[1024, 2048]
     self.load_listname(args)
     self.pretrained_model=args.premodel
     #transformer label
     self.transLabel = {label.trainId : label.id for label in labels} ##
     self.transColor = {label.trainId : label.color for label in labels}
def main():
    """main function"""

    if not torch.cuda.is_available():
        sys.exit("Need a CUDA device to run the code.")

    args = parse_args()
    print('Called with args:')
    print(args)

    dataset = WAD_CVPR2018(args.dataset_dir)
    cfg.MODEL.NUM_CLASSES = len(
        dataset.eval_class) + 1  # with a background class

    print('load cfg from file: {}'.format(args.cfg_file))
    cfg_from_file(args.cfg_file)

    cfg.RESNETS.IMAGENET_PRETRAINED = False  # Don't need to load imagenet pretrained weights
    assert_and_infer_cfg()

    maskRCNN = Generalized_RCNN()
    maskRCNN.cuda()

    if args.load_ckpt:
        load_name = args.load_ckpt
        print("loading checkpoint %s" % (load_name))
        checkpoint = torch.load(load_name,
                                map_location=lambda storage, loc: storage)
        net_utils.load_ckpt(maskRCNN, checkpoint['model'])

    maskRCNN = mynn.DataParallel(maskRCNN,
                                 cpu_keywords=['im_info', 'roidb'],
                                 minibatch=True,
                                 device_ids=[0])  # only support single GPU

    maskRCNN.eval()
    imglist = misc_utils.get_imagelist_from_dir(dataset.test_image_dir)
    num_images = len(imglist)

    output_dir = os.path.join(('/').join(args.load_ckpt.split('/')[:-2]),
                              'Images')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for i in tqdm(xrange(num_images)):
        im = cv2.imread(imglist[i])
        assert im is not None
        timers = defaultdict(Timer)
        im_name, _ = os.path.splitext(os.path.basename(imglist[i]))
        args.current_im_name = im_name
        cls_boxes, cls_segms, prediction_row = im_detect_all(args,
                                                             maskRCNN,
                                                             im,
                                                             dataset,
                                                             timers=timers)

        thefile = open(os.path.join(output_dir, im_name + '.txt'), 'w')
        for item in prediction_row:
            thefile.write("%s\n" % item)
Beispiel #13
0
def main():
    args = parse_args()
    print('load cfg from file: {}'.format(args.cfg_file))
    cfg_from_file(args.cfg_file)
    if args.nms_soft:
        cfg.TEST.SOFT_NMS.ENABLED = True
    else:
        cfg.TEST.NMS = args.nms

    if args.nms_soft:
        output_dir = os.path.join(
            ('/').join(args.load_ckpt.split('/')[:-2]),
            'Images_' + str(cfg.TEST.SCALE) + '_SOFT_NMS')
    elif args.nms:
        output_dir = os.path.join(
            ('/').join(args.load_ckpt.split('/')[:-2]),
            'Images_' + str(cfg.TEST.SCALE) + '_NMS_%.2f' % args.nms)
    else:
        output_dir = os.path.join(('/').join(args.load_ckpt.split('/')[:-2]),
                                  'Images_' + str(cfg.TEST.SCALE))

    if cfg.TEST.BBOX_AUG.ENABLED:
        output_dir += '_TEST_AUG'
    # if args.cls_boxes_confident_threshold < 0.5:
    output_dir += '_cls_boxes_confident_threshold_%.1f' % args.cls_boxes_confident_threshold

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    args.output_vis_dir = os.path.join(output_dir, 'Image_Vis')
    if not os.path.exists(args.output_vis_dir):
        os.makedirs(args.output_vis_dir)

    args.output_img_dir = os.path.join(output_dir, 'Image_Masks')
    if not os.path.exists(args.output_img_dir):
        os.makedirs(args.output_img_dir)

    args.output_list_dir = os.path.join(output_dir, 'List_Masks')
    if not os.path.exists(args.output_list_dir):
        os.makedirs(args.output_list_dir)
    print('Called with args:')
    print(args)

    if args.range is None:
        args.test_net_file, _ = os.path.splitext(__file__)
        dataset = WAD_CVPR2018(args.dataset_dir)

        img_produced = os.listdir(args.output_list_dir)
        imglist_all = misc_utils.get_imagelist_from_dir(dataset.test_image_dir)
        imglist = [
            x for x in imglist_all
            if x.split('/')[-1][:-4] + '.txt' not in img_produced
        ]
        num_images = len(imglist)
        multi_gpu_test_net_on_dataset(args, num_images)
    else:
        test_net_on_dataset_multigpu(args)
Beispiel #14
0
def main():
    args = _parse_args()
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.opts is not None:
        cfg_from_list(args.opts)
    assert_and_infer_cfg()
    test_output_dir = get_output_dir(training=False)
    json_data, _, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
    run_posetrack_tracking(test_output_dir, json_data)
def main():
    config_file = 'deploy_config.yaml'
    cfg_from_file(config_file)
    assert_and_infer_cfg()
    add_configure()

    i3d = init_net()
    cap = cv2.VideoCapture('test.mp4')
    while True:
        action_recognition(i3d, cap)
    def __init__(self):
        cfg_from_file("maskrcnn/configs/baselines/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x.yaml")
        cfg.RESNETS.IMAGENET_PRETRAINED = False  # Don't need to load imagenet pretrained weights
        assert_and_infer_cfg()

        maskRCNN = Generalized_RCNN()
        maskRCNN.cuda()
        pretrained_path = "maskrcnn/data/X-152-32x8d-FPN-IN5k.pkl"
        print("loading detectron weights %s" % pretrained_path)
        load_detectron_weight(maskRCNN, pretrained_path)
        maskRCNN.eval()         # Note this step

        self.maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
                                     minibatch=True, device_ids=[0])  # only support single GPU
Beispiel #17
0
def main(args):

    cfg = config.cfg_from_file(args.cfg)

    log_dir = os.path.join('logs/nyu', args.name)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    checkpoint_dir = os.path.join('checkpoints/nyu', args.name)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    tmp_dir = os.path.join('tmp/nyu', args.name)
    if not os.path.exists(tmp_dir):
        os.makedirs(tmp_dir)

    cfg.LOG_DIR = log_dir
    cfg.CHECKPOINT_DIR = checkpoint_dir
    cfg.TMP_DIR = tmp_dir

    solver = DeepV2DTrainer(cfg)
    ckpt = None

    if args.restore is not None:
        solver.train(args.tfrecords, cfg, stage=2, restore_ckpt=args.restore, num_gpus=args.num_gpus)

    else:
        for stage in [1, 2]:
            ckpt = solver.train(args.tfrecords, cfg, stage=stage, ckpt=ckpt, num_gpus=args.num_gpus)
            tf.reset_default_graph()
Beispiel #18
0
def main(args):

    if args.cfg is None:
        if 'nyu' in args.model:
            args.cfg = 'cfgs/nyu.yaml'
        elif 'scannet' in args.model:
            args.cfg = 'cfgs/scannet.yaml'
        elif 'kitti' in args.model:
            args.cfg = 'cfgs/kitti.yaml'
        else:
            args.cfg = 'cfgs/nyu.yaml'

    cfg = config.cfg_from_file(args.cfg)
    is_calibrated = not args.uncalibrated

    # build the DeepV2D graph
    deepv2d = DeepV2D(cfg,
                      args.model,
                      use_fcrn=args.fcrn,
                      is_calibrated=is_calibrated,
                      mode=args.mode)

    with tf.Session() as sess:
        deepv2d.set_session(sess)

        # call deepv2d on a video sequence
        images, intrinsics = load_test_sequence(args.sequence)

        if is_calibrated:
            depths, poses = deepv2d(images,
                                    intrinsics,
                                    viz=True,
                                    iters=args.n_iters)
        else:
            depths, poses = deepv2d(images, viz=True, iters=args.n_iters)
Beispiel #19
0
def make_predictions(args):
    """ Run inference over the test images """

    np.random.seed(1234)
    cfg = config.cfg_from_file(args.cfg)

    db = KittiRaw(args.dataset_dir)
    scale = db.args['scale']
    crop = db.args['crop']
 
    deepv2d = DeepV2D(cfg, args.model, use_fcrn=False, mode='keyframe')

    with tf.Session() as sess:
        deepv2d.set_session(sess)

        predictions = []
        for (images, intrinsics) in db.test_set_iterator():
            depth_predictions, _ = deepv2d(images, intrinsics, iters=args.n_iters)
        
            keyframe_depth = depth_predictions[0]
            keyframe_image = images[0]

            pred = process_for_evaluation(keyframe_depth, scale, crop)
            predictions.append(pred.astype(np.float32))

            if args.viz:
                image_and_depth = vis.create_image_depth_figure(keyframe_image, keyframe_depth)
                cv2.imshow('image', image_and_depth/255.0)
                cv2.waitKey(10)

        return predictions
Beispiel #20
0
def make_predictions(args):

    cfg = config.cfg_from_file(args.cfg)
    deepv2d = DeepV2D(cfg, args.model, use_fcrn=True, mode=args.mode)

    with tf.Session() as sess:
        deepv2d.set_session(sess)

        depth_predictions, pose_predictions = [], []
        depth_groundtruth, pose_groundtruth = [], []
        db = ScanNet(args.dataset_dir)

        for test_id, test_blob in enumerate(db.test_set_iterator()):
            images, intrinsics = test_blob['images'], test_blob['intrinsics']
            depth_pred, poses_pred = deepv2d(images, intrinsics)

            # use keyframe depth for evaluation
            depth_predictions.append(depth_pred[0])

            # BA-Net evaluates pose as the relative transformation between two frames
            delta_pose = poses_pred[1] @ np.linalg.inv(poses_pred[0])
            pose_predictions.append(delta_pose)

            depth_groundtruth.append(test_blob['depth'])
            pose_groundtruth.append(test_blob['pose'])

    predictions = (depth_predictions, pose_predictions)
    groundtruth = (depth_groundtruth, pose_groundtruth)
    return groundtruth, predictions
Beispiel #21
0
def main(args):

    cfg = config.cfg_from_file(args.cfg)

    log_dir = os.path.join('logs/nyu', args.name)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    checkpoint_dir = os.path.join('checkpoints/nyu', args.name)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    if not os.path.exists(args.tmp_dir):
        os.makedirs(args.tmp_dir)

    cfg.LOG_DIR = log_dir
    cfg.CHECKPOINT_DIR = checkpoint_dir
    cfg.TMP_DIR = args.tmp_dir

    solver = DeepV2DTrainer(cfg)
    ckpt = None

    for stage in [1, 2]:
        ckpt = solver.train(args.tfrecords, cfg, stage=stage, ckpt=ckpt)
        tf.reset_default_graph()
Beispiel #22
0
def main(args):
    cfg = config.cfg_from_file(args.cfg)
    net = DeepV2DSLAM(INPUT_DIMS, cfg)

    images, orb_poses, intrinsics = load_slam_sequence(args.sequence)
    fig1 = plt.figure()
    fig2, (ax1, ax2) = plt.subplots(1,2)
    ax = fig1.add_subplot(111, projection='3d')

    with tf.Session() as sess:
        net.restore(sess, args.model)
        net.set_fcrn_weights(sess)
        net.set_intrinsics(intrinsics)

        for image in images:
            ##### Update the tracker #####
            start = time.time()
            net.update(image)
            stop = time.time()
            print("Iteration Time: %f" % (stop-start))

             ##### Display the results #####
            ax.cla()
            ax1.cla()
            ax2.cla()
            ax1.imshow(net.keyframe_image[...,::-1]/255.0)
            ax2.imshow(net.keyframe_depth[...,0])
            plot_trajectory(ax, net.poses, name='DeepV2D')
            plot_trajectory(ax, orb_poses[:len(net.poses)+1], name='RGB-D ORB-SLAM')
            ax.legend()
            plt.pause(0.05)
 def __init__(self, cfg_file, exp_dir=None):
     # Override the default configs
     cfg_from_file(cfg_file)
     if cfg.EXP_DIR != '':
         exp_dir = cfg.EXP_DIR
     if exp_dir is None:
         model_id = time.strftime('%Y%m%d_%H%M%S',
                                  time.localtime(time.time()))
         self.experiment_dir = '../experiments/{}'.format(model_id)
         if not os.path.exists(self.experiment_dir):
             os.makedirs(self.experiment_dir)
     else:
         if not os.path.exists(exp_dir):
             raise ValueError(
                 'ExperimentDir({}) does not exist.'.format(exp_dir))
         self.experiment_dir = exp_dir
Beispiel #24
0
def main(args):

    if args.dataset == 'kitti':
        cfg = config.cfg_from_file('cfgs/kitti.yaml')
 
        model = 'models/kitti.ckpt'
        slam = DeepV2DSLAM_KITTI(cfg, model, n_keyframes=args.n_keyframes)

        dataset_dir = '/media/datadrive/data/KITTI/raw'
        db = KittiRaw(dataset_dir)

        if args.sequence is None:
            args.sequence = '2011_09_26_drive_0002'

        
    else:
        cfg = config.cfg_from_file('cfgs/nyu.yaml')
        model = 'models/nyu_scannet.ckpt'
        slam = DeepV2DSLAM(cfg, model, n_keyframes=args.n_keyframes, rate=args.rate, use_fcrn=True)

        if args.dataset == 'scannet':
            dataset_dir = 'data/slam/scannet/'
            db = ScanNet(dataset_dir)

        elif args.dataset == 'nyu':
            dataset_dir = 'data/slam/nyu/'
            db = NYUv2(dataset_dir)

        elif args.dataset == 'tum':
            dataset_dir = 'data/slam/tum'
            db = TUM_RGBD(dataset_dir)

        else:
            print("Dataset must be [kitti, scannet, nyu, or tum]")

        if args.sequence is None:
            args.sequence = os.listdir(dataset_dir)[0]


    with tf.Session() as sess:

        # initialize the tracker and restore weights
        slam.set_session(sess)
        slam.start_visualization(args.cinematic, args.render_path, args.clear_points)

        for (image, intrinsics) in db.iterate_sequence(args.sequence):
            slam(image, intrinsics)
Beispiel #25
0
def main():
    parser = argparse.ArgumentParser(description='Visualize predictions')
    parser.add_argument('--config_file', type=str, default=None, required=True,
                        help='Optional config file for params')
    parser.add_argument('opts', help='see config.py for all options',
                        default=None, nargs=argparse.REMAINDER)
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    if args.config_file is not None:
        cfg_from_file(args.config_file)
    if args.opts is not None:
        cfg_from_list(args.opts)

    assert_and_infer_cfg()
    print_cfg()
    visualize_location(args)
Beispiel #26
0
def main(args):

    cfg = config.cfg_from_file(args.cfg)
    images = load_video(args.video)
    
    deepv2d = DeepV2D(cfg, args.model, mode=args.mode, image_dims=[None, images.shape[1], images.shape[2]],
        use_fcrn=True, is_calibrated=False, use_regressor=False)
   
    with tf.Session() as sess:
        deepv2d.set_session(sess)
        depths, poses = deepv2d(images, viz=True, iters=args.n_iters)
Beispiel #27
0
 def __init__(self, args):
     # assert False, 'merge config'
     cfg_from_file(args.config)
     cfg.TRAIN.IMS_PER_BATCH = 1
     args.aug_scale=cfg.TEST.SCALES
     args.input_size=cfg.SEM.INPUT_SIZE
     self.input_size=cfg.SEM.INPUT_SIZE
     print ('test scale:',args.aug_scale)
     self._cur = 0
     self.load_image = self.load_semseg_image
     if 'steel' in args.dataset:
         self.input_size = args.input_size
         self.aug_scale = args.aug_scale
         if 'train' in args.dataset :
             self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/steel/annotations/train.txt')
         elif 'test' in args.dataset :
             self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/steel/annotations/test.txt')
         else:
             self.label_root = os.path.join(os.getcwd(),'lib/datasets/data/steel/annotations/val.txt')
         self.num_class = 5
         self.image_shape=[256, 1600]
     self.load_listname(args)
     self.pretrained_model=args.premodel
Beispiel #28
0
    def __init__(self, args, quiet=False):
        self.args = args
        self.quiet = quiet

        # config
        # Reading the config
        if type(args.cfg_file) is str \
                and os.path.isfile(args.cfg_file):

            cfg_from_file(args.cfg_file)
            if args.set_cfgs is not None:
                cfg_from_list(args.set_cfgs)

        self.start_epoch = 0
        self.best_score = -1e16
        self.checkpoint = Checkpoint(args.snapshot_dir, max_n = 5)

        if not quiet:
            #self.model_id = "%s" % args.run
            logdir = os.path.join(args.logdir, 'train')
            logdir_val = os.path.join(args.logdir, 'val')

            self.writer = SummaryWriter(logdir)
            self.writer_val = SummaryWriter(logdir_val)
Beispiel #29
0
    def __init__(self, args):
        # assert False, 'merge config'
        cfg_from_file(args.config)
        cfg.TRAIN.IMS_PER_BATCH = 1
        args.aug_scale = cfg.TRAIN.SCALES
        print("config_name: ", args.config)
        print("trainset:", cfg.SEM.INPUT_SIZE)
        args.input_size = cfg.SEM.INPUT_SIZE
        self.input_size = cfg.SEM.INPUT_SIZE
        print('test scale:', args.aug_scale)
        self.resize_h = 0
        self.resize_w = 0
        self._cur = 0

        #    to_test = to_test_semseg(args)
        if 'cityscape' in args.dataset:
            self.input_size = args.input_size
            self.aug_scale = args.aug_scale
            if 'train_on_val' in args.dataset:
                self.label_root = os.path.join(
                    os.getcwd(),
                    'lib/datasets/data/cityscapes/annotations/Cityscape_disp_SegFlow_train.txt'
                )
            elif 'train' in args.dataset:
                self.label_root = os.path.join(
                    os.getcwd(),
                    'lib/datasets/data/cityscapes/annotations/train.txt')
            else:
                self.label_root = os.path.join(
                    os.getcwd(),
                    'lib/datasets/data/cityscapes/annotations/val.txt')
            self.num_class = 19
            self.image_shape = [1024, 2048]

        self.load_listname(args)
        self.load_listname(args)
Beispiel #30
0
def main(args):
    cfg = config.cfg_from_file(args.cfg)
    net = DeepV2D(INPUT_DIMS, cfg)

    with tf.Session() as sess:
        net.restore(sess, args.model)

        data_blob = load_test_sequence(args.sequence)
        depths = net.forward(data_blob, iters=5)
        depth = np.squeeze(depths[-1])

        fig, (ax1, ax2) = plt.subplots(2, 1)
        keyframe = data_blob['images'][0]
        keyframe = keyframe[...,::-1]/255.0
        ax1.imshow(keyframe)
        ax2.imshow(depth)
        ax1.axis('off')
        ax2.axis('off')
        plt.show()
Beispiel #31
0
def main(args):

    np.random.seed(1234)
    cfg = config.cfg_from_file(args.cfg)
    INPUT_DIMS = [args.n_frames + 1, cfg.INPUT.HEIGHT, cfg.INPUT.WIDTH]
    net = DeepV2D(INPUT_DIMS, cfg, use_fcrn=args.fcrn_init)

    init_mode = 'constant'
    predictions = []
    with tf.Session() as sess:
        net.restore(sess, args.model)

        if args.fcrn_init:
            net.set_fcrn_weights(sess)
            init_mode = 'fcrn'

        if args.viz:
            fig, (ax1, ax2) = plt.subplots(1, 2)

        test_path = 'nyu_data/nyu'
        for sequence in sorted(os.listdir(test_path)):
            data_blob = load_test_sequence(os.path.join(test_path, sequence),
                                           args.n_frames)
            depth_predictions = net.forward(data_blob,
                                            iters=args.n_iters,
                                            init_mode=init_mode)
            depth = np.squeeze(depth_predictions[-1])

            if args.viz:
                ax1.cla()
                ax2.cla()
                keyframe = data_blob['images'][0]
                keyframe = keyframe[..., ::-1] / 255.0
                ax1.imshow(keyframe)
                ax2.imshow(depth)
                ax1.axis('off')
                ax2.axis('off')
                plt.pause(0.05)

            predictions.append(depth)

    predictions = np.stack(predictions, axis=0)
    np.save(args.prediction_file, predictions)
def main():
    """Main function"""

    args = parse_args()
    print('Called with args:')
    print(args)

    if not torch.cuda.is_available():
        sys.exit("Need a CUDA device to run the code.")

    if args.cuda or cfg.NUM_GPUS > 0:
        cfg.CUDA = True
    else:
        raise ValueError("Need Cuda device to run !")

    if args.dataset == "coco2017":
        cfg.TRAIN.DATASETS = ('coco_2017_train',)
        cfg.MODEL.NUM_CLASSES = 81
    elif args.dataset == "keypoints_coco2017":
        cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
        cfg.MODEL.NUM_CLASSES = 2
    else:
        raise ValueError("Unexpected args.dataset: {}".format(args.dataset))

    cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    ### Adaptively adjust some configs ###
    original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
    original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
    original_num_gpus = cfg.NUM_GPUS
    if args.batch_size is None:
        args.batch_size = original_batch_size
    cfg.NUM_GPUS = torch.cuda.device_count()
    assert (args.batch_size % cfg.NUM_GPUS) == 0, \
        'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
    cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
    effective_batch_size = args.iter_size * args.batch_size
    print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))

    print('Adaptive config changes:')
    print('    effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
    print('    NUM_GPUS:             %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
    print('    IMS_PER_BATCH:        %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))

    ### Adjust learning based on batch size change linearly
    # For iter_size > 1, gradients are `accumulated`, so lr is scaled based
    # on batch_size instead of effective_batch_size
    old_base_lr = cfg.SOLVER.BASE_LR
    cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
    print('Adjust BASE_LR linearly according to batch_size change:\n'
          '    BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))

    ### Adjust solver steps
    step_scale = original_batch_size / effective_batch_size
    old_solver_steps = cfg.SOLVER.STEPS
    old_max_iter = cfg.SOLVER.MAX_ITER
    cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
    cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
    print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
          '    SOLVER.STEPS: {} --> {}\n'
          '    SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
                                                  old_max_iter, cfg.SOLVER.MAX_ITER))

    # Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
    # of `collect_and_distribute_fpn_rpn_proposals.py`
    #
    # post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
    if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
        cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
        print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
              '    cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))

    if args.num_workers is not None:
        cfg.DATA_LOADER.NUM_THREADS = args.num_workers
    print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)

    ### Overwrite some solver settings from command line arguments
    if args.optimizer is not None:
        cfg.SOLVER.TYPE = args.optimizer
    if args.lr is not None:
        cfg.SOLVER.BASE_LR = args.lr
    if args.lr_decay_gamma is not None:
        cfg.SOLVER.GAMMA = args.lr_decay_gamma
    assert_and_infer_cfg()

    timers = defaultdict(Timer)

    ### Dataset ###
    timers['roidb'].tic()
    roidb, ratio_list, ratio_index = combined_roidb_for_training(
        cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
    timers['roidb'].toc()
    roidb_size = len(roidb)
    logger.info('{:d} roidb entries'.format(roidb_size))
    logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)

    # Effective training sample size for one epoch
    train_size = roidb_size // args.batch_size * args.batch_size

    batchSampler = BatchSampler(
        sampler=MinibatchSampler(ratio_list, ratio_index),
        batch_size=args.batch_size,
        drop_last=True
    )
    dataset = RoiDataLoader(
        roidb,
        cfg.MODEL.NUM_CLASSES,
        training=True)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_sampler=batchSampler,
        num_workers=cfg.DATA_LOADER.NUM_THREADS,
        collate_fn=collate_minibatch)
    dataiterator = iter(dataloader)

    ### Model ###
    maskRCNN = Generalized_RCNN()

    if cfg.CUDA:
        maskRCNN.cuda()

    ### Optimizer ###
    gn_param_nameset = set()
    for name, module in maskRCNN.named_modules():
        if isinstance(module, nn.GroupNorm):
            gn_param_nameset.add(name+'.weight')
            gn_param_nameset.add(name+'.bias')
    gn_params = []
    gn_param_names = []
    bias_params = []
    bias_param_names = []
    nonbias_params = []
    nonbias_param_names = []
    nograd_param_names = []
    for key, value in dict(maskRCNN.named_parameters()).items():
        if value.requires_grad:
            if 'bias' in key:
                bias_params.append(value)
                bias_param_names.append(key)
            elif key in gn_param_nameset:
                gn_params.append(value)
                gn_param_names.append(key)
            else:
                nonbias_params.append(value)
                nonbias_param_names.append(key)
        else:
            nograd_param_names.append(key)
    assert (gn_param_nameset - set(nograd_param_names) - set(bias_param_names)) == set(gn_param_names)

    # Learning rate of 0 is a dummy value to be set properly at the start of training
    params = [
        {'params': nonbias_params,
         'lr': 0,
         'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
        {'params': bias_params,
         'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
         'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
        {'params': gn_params,
         'lr': 0,
         'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
    ]
    # names of paramerters for each paramter
    param_names = [nonbias_param_names, bias_param_names, gn_param_names]

    if cfg.SOLVER.TYPE == "SGD":
        optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
    elif cfg.SOLVER.TYPE == "Adam":
        optimizer = torch.optim.Adam(params)

    ### Load checkpoint
    if args.load_ckpt:
        load_name = args.load_ckpt
        logging.info("loading checkpoint %s", load_name)
        checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
        net_utils.load_ckpt(maskRCNN, checkpoint['model'])
        if args.resume:
            args.start_step = checkpoint['step'] + 1
            if 'train_size' in checkpoint:  # For backward compatibility
                if checkpoint['train_size'] != train_size:
                    print('train_size value: %d different from the one in checkpoint: %d'
                          % (train_size, checkpoint['train_size']))

            # reorder the params in optimizer checkpoint's params_groups if needed
            # misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)

            # There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
            # However it's fixed on master.
            # optimizer.load_state_dict(checkpoint['optimizer'])
            misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
        del checkpoint
        torch.cuda.empty_cache()

    if args.load_detectron:  #TODO resume for detectron weights (load sgd momentum values)
        logging.info("loading Detectron weights %s", args.load_detectron)
        load_detectron_weight(maskRCNN, args.load_detectron)

    lr = optimizer.param_groups[0]['lr']  # lr of non-bias parameters, for commmand line outputs.

    maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
                                 minibatch=True)

    ### Training Setups ###
    args.run_name = misc_utils.get_run_name() + '_step'
    output_dir = misc_utils.get_output_dir(args, args.run_name)
    args.cfg_filename = os.path.basename(args.cfg_file)

    if not args.no_save:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        blob = {'cfg': yaml.dump(cfg), 'args': args}
        with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
            pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)

        if args.use_tfboard:
            from tensorboardX import SummaryWriter
            # Set the Tensorboard logger
            tblogger = SummaryWriter(output_dir)

    ### Training Loop ###
    maskRCNN.train()

    CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)

    # Set index for decay steps
    decay_steps_ind = None
    for i in range(1, len(cfg.SOLVER.STEPS)):
        if cfg.SOLVER.STEPS[i] >= args.start_step:
            decay_steps_ind = i
            break
    if decay_steps_ind is None:
        decay_steps_ind = len(cfg.SOLVER.STEPS)

    training_stats = TrainingStats(
        args,
        args.disp_interval,
        tblogger if args.use_tfboard and not args.no_save else None)
    try:
        logger.info('Training starts !')
        step = args.start_step
        for step in range(args.start_step, cfg.SOLVER.MAX_ITER):

            # Warm up
            if step < cfg.SOLVER.WARM_UP_ITERS:
                method = cfg.SOLVER.WARM_UP_METHOD
                if method == 'constant':
                    warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
                elif method == 'linear':
                    alpha = step / cfg.SOLVER.WARM_UP_ITERS
                    warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
                else:
                    raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
                lr_new = cfg.SOLVER.BASE_LR * warmup_factor
                net_utils.update_learning_rate(optimizer, lr, lr_new)
                lr = optimizer.param_groups[0]['lr']
                assert lr == lr_new
            elif step == cfg.SOLVER.WARM_UP_ITERS:
                net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
                lr = optimizer.param_groups[0]['lr']
                assert lr == cfg.SOLVER.BASE_LR

            # Learning rate decay
            if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
                    step == cfg.SOLVER.STEPS[decay_steps_ind]:
                logger.info('Decay the learning on step %d', step)
                lr_new = lr * cfg.SOLVER.GAMMA
                net_utils.update_learning_rate(optimizer, lr, lr_new)
                lr = optimizer.param_groups[0]['lr']
                assert lr == lr_new
                decay_steps_ind += 1

            training_stats.IterTic()
            optimizer.zero_grad()
            for inner_iter in range(args.iter_size):
                try:
                    input_data = next(dataiterator)
                except StopIteration:
                    dataiterator = iter(dataloader)
                    input_data = next(dataiterator)

                for key in input_data:
                    if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
                        input_data[key] = list(map(Variable, input_data[key]))

                net_outputs = maskRCNN(**input_data)
                training_stats.UpdateIterStats(net_outputs, inner_iter)
                loss = net_outputs['total_loss']
                loss.backward()
            optimizer.step()
            training_stats.IterToc()

            training_stats.LogIterStats(step, lr)

            if (step+1) % CHECKPOINT_PERIOD == 0:
                save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)

        # ---- Training ends ----
        # Save last checkpoint
        save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)

    except (RuntimeError, KeyboardInterrupt):
        del dataiterator
        logger.info('Save ckpt on exception ...')
        save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
        logger.info('Save ckpt done.')
        stack_trace = traceback.format_exc()
        print(stack_trace)

    finally:
        if args.use_tfboard and not args.no_save:
            tblogger.close()
Beispiel #33
0
def main():
    """main function"""

    if not torch.cuda.is_available():
        sys.exit("Need a CUDA device to run the code.")

    args = parse_args()
    print('Called with args:')
    print(args)

    assert args.image_dir or args.images
    assert bool(args.image_dir) ^ bool(args.images)

    if args.dataset.startswith("coco"):
        dataset = datasets.get_coco_dataset()
        cfg.MODEL.NUM_CLASSES = len(dataset.classes)
    elif args.dataset.startswith("keypoints_coco"):
        dataset = datasets.get_coco_dataset()
        cfg.MODEL.NUM_CLASSES = 2
    else:
        raise ValueError('Unexpected dataset name: {}'.format(args.dataset))

    print('load cfg from file: {}'.format(args.cfg_file))
    cfg_from_file(args.cfg_file)

    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
        'Exactly one of --load_ckpt and --load_detectron should be specified.'
    cfg.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS = False  # Don't need to load imagenet pretrained weights
    assert_and_infer_cfg()

    maskRCNN = Generalized_RCNN()

    if args.cuda:
        maskRCNN.cuda()

    if args.load_ckpt:
        load_name = args.load_ckpt
        print("loading checkpoint %s" % (load_name))
        checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
        net_utils.load_ckpt(maskRCNN, checkpoint['model'])

    if args.load_detectron:
        print("loading detectron weights %s" % args.load_detectron)
        load_detectron_weight(maskRCNN, args.load_detectron)

    maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
                                 minibatch=True, device_ids=[0])  # only support single GPU

    maskRCNN.eval()
    if args.image_dir:
        imglist = misc_utils.get_imagelist_from_dir(args.image_dir)
    else:
        imglist = args.images
    num_images = len(imglist)
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    for i in xrange(num_images):
        print('img', i)
        im = cv2.imread(imglist[i])
        assert im is not None

        timers = defaultdict(Timer)

        cls_boxes, cls_segms, cls_keyps = im_detect_all(maskRCNN, im, timers=timers)

        im_name, _ = os.path.splitext(os.path.basename(imglist[i]))
        vis_utils.vis_one_image(
            im[:, :, ::-1],  # BGR -> RGB for visualization
            im_name,
            args.output_dir,
            cls_boxes,
            cls_segms,
            cls_keyps,
            dataset=dataset,
            box_alpha=0.3,
            show_class=True,
            thresh=0.7,
            kp_thresh=2
        )

    if args.merge_pdfs and num_images > 1:
        merge_out_path = '{}/results.pdf'.format(args.output_dir)
        if os.path.exists(merge_out_path):
            os.remove(merge_out_path)
        command = "pdfunite {}/*.pdf {}".format(args.output_dir,
                                                merge_out_path)
        subprocess.call(command, shell=True)
            p_tensor.copy_(torch.Tensor(src_blobs[d_name]))


def resnet_weights_name_pattern():
    pattern = re.compile(r"conv1_w|conv1_gn_[sb]|res_conv1_.+|res\d_\d_.+")
    return pattern


if __name__ == '__main__':
    """Testing"""
    from pprint import pprint
    import sys
    sys.path.insert(0, '..')
    from modeling.model_builder import Generalized_RCNN
    from core.config import cfg, cfg_from_file

    cfg.MODEL.NUM_CLASSES = 81
    cfg_from_file('../../cfgs/res50_mask.yml')
    net = Generalized_RCNN()

    # pprint(list(net.state_dict().keys()), width=1)

    mapping, orphans = net.detectron_weight_mapping
    state_dict = net.state_dict()

    for k in mapping.keys():
        assert k in state_dict, '%s' % k

    rest = set(state_dict.keys()) - set(mapping.keys())
    assert len(rest) == 0
Beispiel #35
0
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    if args.exp_dir is not None:
        cfg.EXP_DIR = args.exp_dir

    cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(cfg)

    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    output_dir_name = 'test'
    if args.datasets: