Exemple #1
0
    def __init__(self, modelpath):
        self.model_path = modelpath
        self.satellite_names = [u'BG', u'Planes', u'Ships', u'Helicopter', u'Vehicles', u'Bridges', u'Buildings',
                       u'Parking Lots', u'Satellite Dish', u'Solar Panels', u'Storage Tank', u'Swimming Pool',
                       u'Sports Stadium/Field', u'Shipping Containers', u'Crane', u'Train', u'Mil Vehicles',
                       u'Missiles/Missile Systems', u'Comms Towers']

        self.args = parser()
        update_config(self.args.cfg)
        if self.args.set_cfg_list:
            update_config_from_list(self.args.set_cfg_list)
        # Use just the first GPU for demo
        self.context = [mx.gpu(int(config.gpus[0]))]
        if not os.path.isdir(config.output_path):
            os.mkdir(config.output_path)

        logger, output_path = create_logger(config.output_path, self.args.cfg, config.dataset.image_set)
        # Pack db info
        self.db_info = EasyDict()
        self.db_info.name = 'coco'
        self.db_info.result_path = 'data/demo'

        self.db_info.classes = self.satellite_names
        self.db_info.num_classes = len(self.db_info.classes)

        # Create the model
        sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
        self.sym_inst = sym_def(n_proposals=400, test_nbatch=1)
        self.sym = self.sym_inst.get_symbol_rcnn(config, is_train=False)
        self.model_prefix = os.path.join(output_path, self.args.save_prefix)
        start = timeit.default_timer()

        self.arg_params, self.aux_params = load_param(self.model_prefix, config.TEST.TEST_EPOCH,
                                            convert=True, process=True)
        stop = timeit.default_timer()
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Create roidb
    roidb, imdb = load_proposal_roidb(config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path,
                                      config.dataset.dataset_path,
                                      proposal=config.dataset.proposal, only_gt=True, flip=False,
                                      result_path=config.output_path,
                                      proposal_path=config.proposal_path, get_imdb=True)

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
    print(output_path)
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)

    sym_inst = eval('{}.{}'.format(config.symbol, config.symbol))
    if config.TEST.EXTRACT_PROPOSALS:
        imdb_proposal_extraction_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis)
    else:
        imdb_detection_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis)
Exemple #3
0
def main():
    args = parser()
    update_config(args.cfg)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{'image': args.im_path, 'width': width, 'height': height, 'flipped': False}]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

    # Create the model and initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)

    # Get the symbol definition
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                       u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                       u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                       u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                       u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                       u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                       u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                       u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                       u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                       u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                       u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
    db_info.num_classes = len(db_info.classes)

    # Perform detection for each scale in parallel
    p_args = []
    for s in config.TEST.SCALES:
        p_args.append([s, context, config, sym_def, roidb, db_info, arg_params, aux_params])
    pool = Pool(len(config.TEST.SCALES))
    all_detections = pool.map(scale_worker, p_args)

    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
                                          vis_name='demo_detections')
Exemple #4
0
def get_config():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    import yaml
    from easydict import EasyDict as edict
    with open(args.cfg) as f:
        exp_config = edict(yaml.load(f))
    return exp_config
Exemple #5
0
    def __init__(self, modelpath):
        self.model_path = modelpath
        

        #self.args = parser()

        self.cfg = '/sniper/service/models/SNIPER/sniper_utils/configs/faster/sniper_res101_e2e_mask_pred_satellite.yml'
        self.save_prefix = "SNIPER"
        

        update_config(self.cfg)

        self.satellite_names = [u'BG', u'Planes', u'Ships', u'Helicopter', u'Vehicles', u'Bridges', u'Buildings',
                       u'Parking Lots', u'Satellite Dish', u'Solar Panels', u'Storage Tank', u'Swimming Pool',
                       u'Sports Stadium/Field', u'Shipping Containers', u'Crane', u'Train', u'Mil Vehicles',
                       u'Missiles/Missile Systems', u'Comms Towers']
        # self.satellite_names = [u'BG', u'Planes', u'Ships', u'Helicopter', u'Vehicles', u'Buildings',
        #                u'Parking Lots',  u'Storage Tank', u'Swimming Pool',
        #                u'Sports Stadium/Field', u'Shipping Containers', u'Crane',  u'Comms Towers']

        assert config.dataset.NUM_CLASSES == len(self.satellite_names), "Incorrect specification of classes"


        # Use just the first GPU for demo
        self.context = [mx.gpu(int(config.gpus[0]))]
        config.output_path = "/sniper/service/checkpoint/sniper_res_101_bn_mask_satellite_18"
        if not os.path.isdir(config.output_path):
            os.mkdir(config.output_path)

        logger, output_path = create_logger(config.output_path, self.cfg, config.dataset.image_set)
        # Pack db info
        self.db_info = EasyDict()
        self.db_info.name = 'coco'
        self.db_info.result_path = 'data/demo'

        self.db_info.classes = self.satellite_names
        self.db_info.num_classes = len(self.db_info.classes)

        # Create the model
        sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
        self.sym_inst = sym_def(n_proposals=400, test_nbatch=1)
        self.sym = self.sym_inst.get_symbol_rcnn(config, is_train=False)
        self.model_prefix = os.path.join(output_path, self.save_prefix)
        config.TEST.TEST_EPOCH = 30
        self.arg_params, self.aux_params = load_param(self.model_prefix, config.TEST.TEST_EPOCH,
                                            convert=True, process=True)
        
        print("Loading {}_{}.params".format(self.model_prefix, config.TEST.TEST_EPOCH))
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Create roidb
    #print(config)

    roidb, imdb = load_proposal_roidb(config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path,
                                      config.dataset.dataset_path,
                                      proposal=config.dataset.proposal, only_gt=True, flip=False,
                                      result_path=config.output_path,
                                      proposal_path=config.proposal_path, get_imdb=True)
    #roidb = roidb[:100]
    #check_point = torch.load('output/faster_rcnn_1220_0_19000.pth')

    load_name = 'output/nofix_3_15000.pth'
    #load_name = 'output/faster_rcnn_jwyang.pth'

    # faster-rcnn
    fasterRCNN = resnet(config.dataset.NUM_CLASSES, 101, pretrained=True, class_agnostic=config.CLASS_AGNOSTIC)
    # init weight
    fasterRCNN.create_architecture()
    print("load checkpoint %s" % (load_name))
    checkpoint = torch.load(load_name)

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in checkpoint['model'].items():
        if k[0:6] == 'module':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v

    fasterRCNN.load_state_dict(new_state_dict)

    fasterRCNN.cuda()
    fasterRCNN = nn.DataParallel(fasterRCNN)
    fasterRCNN.eval()
    if config.TEST.EXTRACT_PROPOSALS:
        imdb_proposal_extraction_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis)
    else:
        imdb_detection_wrapper(fasterRCNN, config, imdb, roidb)
Exemple #7
0
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(1))]
    #context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')]
    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    with open(trainpath + trainsetfile) as f:
        count = 1
        cnt = 0
        annoid = 0
        for line in f:
            cnt += 1
            #if cnt > 1000:
            #    break
            #print line
            addtxtpath = os.path.join(annoaddpath, line.strip() + '_person' + '.txt')
            if os.path.exists(addtxtpath):
                print addtxtpath
                continue
            # line + .jpg
            imagepath = os.path.join(datapath, line.strip() + '.jpg')
            # no obstacle currently drop it
            txtpath = os.path.join(annopath, line.strip() + '.txt')
            if not os.path.exists(txtpath):
                print txtpath
                continue
            #print imagepath
            if not os.path.exists(imagepath):
                imagepath = os.path.join(datapath2, line.strip() + '.jpg')
                if not os.path.exists(imagepath):
                    imagepath = os.path.join(datapath3, line.strip() + '.jpg')
            if not os.path.exists(imagepath):
                continue

            

            #im = cv2.imread(imagepath)
                
            #height, width, _ = im.shape
            height = 1200
            width = 1920
            #print cnt
            if cnt % 1000 == 0:
                print cnt
            #width, height = Image.open(imagepath).size

            # Pack image info
            roidb = [{'image': imagepath, 'width': width, 'height': height, 'flipped': False}]

            # Creating the Logger
            logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

            # Pack db info
            db_info = EasyDict()
            db_info.name = 'coco'
            db_info.result_path = 'data/demo'

            # Categories the detector trained for:
            
            db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                        u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                        u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                        u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                        u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                        u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                        u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                        u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                        u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                        u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                        u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
            '''
            db_info.classes = [u'BG', u'car', u'bus', u'truck', u'person', u'bicycle', u'tricycle', u'block']
            '''
            db_info.num_classes = len(db_info.classes)

            # Create the model
            sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
            sym_inst = sym_def(n_proposals=400, test_nbatch=1)
            sym = sym_inst.get_symbol_rcnn(config, is_train=False)
            test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=1, nGPUs=1, threads=1,
                                    crop_size=None, test_scale=config.TEST.SCALES[0],
                                    num_classes=db_info.num_classes)
            # Create the module
            shape_dict = dict(test_iter.provide_data_single)
            sym_inst.infer_shape(shape_dict)
            mod = mx.mod.Module(symbol=sym,
                                context=context,
                                data_names=[k[0] for k in test_iter.provide_data_single],
                                label_names=None)
            mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)

            # Initialize the weights
            model_prefix = os.path.join(output_path, args.save_prefix)
            arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                                convert=True, process=True)
            mod.init_params(arg_params=arg_params, aux_params=aux_params)

            # Create the tester
            tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

            # Sequentially do detection over scales
            # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
            all_detections= []
            for s in config.TEST.SCALES:
                # Set tester scale
                tester.set_scale(s)
                # Perform detection
                all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))

            # Aggregate results from multiple scales and perform NMS
            tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
            file_name, out_extension = os.path.splitext(os.path.basename(imagepath))
            #print('>>> all detections {}'.format(all_detections))
            last_position = -1
            while True:
                position = line.find("/", last_position + 1)
                if position == -1:
                    break
                last_position = position
            dirpath = os.path.join(annoaddpath, line.strip()[0:last_position])
            if not os.path.isdir(dirpath):
                os.makedirs(dirpath)
            all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='/home/luyujie/addAnno_txt_vis',
                                                vis_name='{}'.format(file_name), vis_ext=out_extension, addtxtpath = os.path.join(annoaddpath, line.strip() + '_person' + '.txt'))

            s = str(cnt).zfill(12)
            newimgpath = os.path.join(outputpath, 'images/train2014', 'COCO_train2014_' + s + '.jpg')
            #shutil.copy(imagepath, newimgpath)

    return all_detections
Exemple #8
0
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{'image': args.im_path, 'width': width, 'height': height, 'flipped': False}]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                       u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                       u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                       u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                       u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                       u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                       u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                       u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                       u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                       u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                       u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=1, nGPUs=1, threads=1,
                               crop_size=None, test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(symbol=sym,
                        context=context,
                        data_names=[k[0] for k in test_iter.provide_data_single],
                        label_names=None)
    mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections= []
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
                                          vis_name='{}_detections'.format(file_name), vis_ext=out_extension)
    return all_detections
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(5))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{
        'image': args.im_path,
        'width': width,
        'height': height,
        'flipped': False
    }]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg,
                                        config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [
        u'BG', u'car', u'van', u'bus', u'truck', u'forklift', u'person',
        u'person-sitting', u'bicycle', u'motor', u'open-tricycle',
        u'close-tricycle', u'water-block', u'cone-block', u'other-block',
        u'crash-block', u'triangle-block', u'warning-block', u'small-block',
        u'small-block', u'large-block', u'bicycle-group', u'person-group',
        u'motor-group', u'parked-bicycle', u'parked-motor', u'cross-car'
    ]
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb,
                               config=config,
                               batch_size=1,
                               nGPUs=1,
                               threads=1,
                               crop_size=None,
                               test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(
        symbol=sym,
        context=context,
        data_names=[k[0] for k in test_iter.provide_data_single],
        label_names=None)
    mod.bind(test_iter.provide_data,
             test_iter.provide_label,
             for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix,
                                        config.TEST.TEST_EPOCH,
                                        convert=True,
                                        process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections = []
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        all_detections.append(
            tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    all_detections = tester.aggregate(
        all_detections,
        vis=True,
        cache_name=None,
        vis_path='./data/demo/',
        vis_name='{}_detections'.format(file_name),
        vis_ext=out_extension)
    return all_detections
Exemple #10
0
def main():
    args = parser()
    update_config(args.cfg)

    # Use just the first GPU for demo
    if args.use_gpu:
        context = [mx.gpu(int(config.gpus[0]))]
    else:
        context = [mx.cpu()]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{
        'image': args.im_path,
        'width': width,
        'height': height,
        'flipped': False
    }]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg,
                                        config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [
        'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter',
        'Passenger Vehicle', 'Small Car', 'Bus', 'Pickup Truck',
        'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box',
        'Truck Tractor', 'Trailer', 'Truck w/Flatbed', 'Truck w/Liquid',
        'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car',
        'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat',
        'Sailboat', 'Tugboat', 'Barge', 'Fishing Vessel', 'Ferry', 'Yacht',
        'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane',
        'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane',
        'Dump Truck', 'Haul Truck', 'Scraper/Tractor',
        'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader',
        'Hut/Tent', 'Shed', 'Building', 'Aircraft Hangar', 'Damaged Building',
        'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad',
        'Storage Tank', 'Shipping container lot', 'Shipping Container',
        'Pylon', 'Tower'
    ]
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb,
                               config=config,
                               batch_size=1,
                               nGPUs=1,
                               threads=1,
                               crop_size=None,
                               test_scale=config.TEST.SCALES[args.scale_index],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(
        symbol=sym,
        context=context,
        data_names=[k[0] for k in test_iter.provide_data_single],
        label_names=None)
    mod.bind(test_iter.provide_data,
             test_iter.provide_label,
             for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix,
                                        config.TEST.TEST_EPOCH,
                                        convert=True,
                                        process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Set tester scale
    # print("args.chip_size * config.TEST.SCALES[args.scale_index]",args.chip_size * config.TEST.SCALES[args.scale_index], args.chip_size ,config.TEST.SCALES[args.scale_index])
    tester.set_scale(config.TEST.SCALES[args.scale_index])
    # Perform detection

    res = tester.get_detections(vis=False, evaluate=False, cache_name=None)
    folder_name = os.path.dirname(args.im_path)
    file_name = os.path.join(folder_name, str(args.scale_index) + ".pkl")
    with open(file_name, 'wb') as handle:
        pickle.dump(res, handle)
Exemple #11
0
def generate_detections():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{
        'image': args.im_path,
        'width': width,
        'height': height,
        'flipped': False
    }]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg,
                                        config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = classes = [
        'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter',
        'Passenger Vehicle', 'Small Car', 'Bus', 'Pickup Truck',
        'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box',
        'Truck Tractor', 'Trailer', 'Truck w/Flatbed', 'Truck w/Liquid',
        'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car',
        'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat',
        'Sailboat', 'Tugboat', 'Barge', 'Fishing Vessel', 'Ferry', 'Yacht',
        'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane',
        'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane',
        'Dump Truck', 'Haul Truck', 'Scraper/Tractor',
        'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader',
        'Hut/Tent', 'Shed', 'Building', 'Aircraft Hangar', 'Damaged Building',
        'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad',
        'Storage Tank', 'Shipping container lot', 'Shipping Container',
        'Pylon', 'Tower'
    ]
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=4000, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb,
                               config=config,
                               batch_size=1,
                               nGPUs=1,
                               threads=1,
                               crop_size=None,
                               test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(
        symbol=sym,
        context=context,
        data_names=[k[0] for k in test_iter.provide_data_single],
        label_names=None)
    mod.bind(test_iter.provide_data,
             test_iter.provide_label,
             for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix,
                                        config.TEST.TEST_EPOCH,
                                        convert=True,
                                        process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections = []
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        all_detections.append(
            tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    all_detections = tester.aggregate(
        all_detections,
        vis=True,
        cache_name=None,
        vis_path='./data/demo/',
        vis_name='{}_detections'.format(file_name),
        vis_ext=out_extension)
    return all_detections
Exemple #12
0
def main():
    args = parser()
    update_config(args.cfg)

    im = cv2.cvtColor(cv2.imread(args.im_path), cv2.COLOR_BGR2RGB)

    arr = np.array(im)
    origin_width, origin_height, _ = arr.shape
    # portion = smart_chipping(origin_width, origin_height)
    # portion = 1120
    portion = 1080

    cwn, chn = (portion, portion)
    wn, hn = (int(origin_width / cwn), int(origin_height / chn))
    padding_y = int(
        math.ceil(float(origin_height) / chn) * chn - origin_height)
    padding_x = int(math.ceil(float(origin_width) / cwn) * cwn - origin_width)
    print("padding_y,padding_x, origin_height, origin_width", padding_y,
          padding_x, origin_height, origin_width)
    # top, bottom, left, right - border width in number of pixels in corresponding directions
    im = cv2.copyMakeBorder(im,
                            0,
                            padding_x,
                            0,
                            padding_y,
                            cv2.BORDER_CONSTANT,
                            value=[0, 0, 0])
    # the section below could be optimized. but basically the idea is to re-calculate all the values
    arr = np.array(im)
    width, height, _ = arr.shape
    cwn, chn = (portion, portion)
    wn, hn = (int(width / cwn), int(height / chn))
    img_name_folder = args.output_folder
    width_chip_num, height_chip_num = chip_image(im, (portion, portion),
                                                 img_name_folder)

    num_preds = int(5000 * math.ceil(float(portion) / 400))
    boxes, scores, classes = generate_detections(width_chip_num,
                                                 height_chip_num, num_preds,
                                                 portion)
    print("boxes shape is", boxes.shape, "wn, hn", wn, hn, "width, height",
          width, height)
    bfull = boxes.reshape((wn, hn, num_preds, 4))

    for i in range(wn):
        for j in range(hn):
            bfull[i, j, :, 0] += j * cwn
            bfull[i, j, :, 2] += j * cwn

            bfull[i, j, :, 1] += i * chn
            bfull[i, j, :, 3] += i * chn

            # clip values
            bfull[i, j, :, 0] = np.clip(bfull[i, j, :, 0], 0, origin_height)
            bfull[i, j, :, 2] = np.clip(bfull[i, j, :, 2], 0, origin_height)

            bfull[i, j, :, 1] = np.clip(bfull[i, j, :, 1], 0, origin_width)
            bfull[i, j, :, 3] = np.clip(bfull[i, j, :, 3], 0, origin_width)

    bfull = bfull.reshape((hn * wn, num_preds, 4))
    scores = scores.reshape((hn * wn, num_preds))
    classes = classes.reshape((hn * wn, num_preds))

    # only display boxes with confidence > .5
    bs = bfull[scores > 0.5]
    cs = classes[scores > 0.5]
    draw_bboxes(arr, bs,
                cs).save(os.path.join(args.output_folder, "prediction.jpg"))

    score_thres = 0.1
    # if bs.shape[0] > scoring_line_threshold:
    # too many predictions, we should trim the low confidence ones
    with open(args.output_file, 'w') as f:
        for i in range(bfull.shape[0]):
            for j in range(bfull[i].shape[0]):
                # box should be xmin ymin xmax ymax
                box = bfull[i, j]
                class_prediction = classes[i, j]
                score_prediction = scores[i, j]
                if int(class_prediction
                       ) != 0 and score_prediction >= score_thres:
                    f.write('%d %d %d %d %d %f \n' %
                            (box[0], box[1], box[2], box[3],
                             int(class_prediction), score_prediction))

    print('done')
Exemple #13
0
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{'image': args.im_path, 'width': width, 'height': height, 'flipped': False}]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                       u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                       u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                       u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                       u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                       u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                       u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                       u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                       u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                       u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                       u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=1, nGPUs=1, threads=1,
                               crop_size=None, test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(symbol=sym,
                        context=context,
                        data_names=[k[0] for k in test_iter.provide_data_single],
                        label_names=None)
    mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections= []
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
                                          vis_name='{}_detections'.format(file_name), vis_ext=out_extension)
    return all_detections
Exemple #14
0
    arg_parser = ArgumentParser('SNIPER training module')
    arg_parser.add_argument('--cfg', dest='cfg', help='Path to the config file',
    							default='configs/faster/sniper_res101_e2e.yml',type=str)
    arg_parser.add_argument('--display', dest='display', help='Number of epochs between displaying loss info',
                            default=100, type=int)
    arg_parser.add_argument('--momentum', dest='momentum', help='BN momentum', default=0.995, type=float)
    arg_parser.add_argument('--save_prefix', dest='save_prefix', help='Prefix used for snapshotting the network',
                            default='SNIPER', type=str)

    return arg_parser.parse_args()


if __name__ == '__main__':

    args = parser()
    update_config(args.cfg)
    context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')]
    nGPUs = len(context)
    batch_size = nGPUs * config.TRAIN.BATCH_IMAGES

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Create roidb
    image_sets = [iset for iset in config.dataset.image_set.split('+')]
    roidbs = [load_proposal_roidb(config.dataset.dataset, image_set, config.dataset.root_path,
        config.dataset.dataset_path,
        proposal=config.dataset.proposal, append_gt=True, flip=config.TRAIN.FLIP,
        result_path=config.output_path,
        proposal_path=config.proposal_path, load_mask=config.TRAIN.WITH_MASK)
        for image_set in image_sets]
Exemple #15
0
def main():
    
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)
    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo/batch_results'

    assert args.dataset in ['coco', 'dota', 'satellite']
    if args.dataset == 'coco':
        db_info.classes = coco_names
    elif args.dataset == 'dota':
        db_info.classes = dota_names
    elif args.dataset == 'satellite':
        db_info.classes = satellite_names

    db_info.num_classes = len(db_info.classes)

    roidb = []

    for img in os.listdir(args.img_dir_path):

        start = time.time()

        im_path = os.path.join(args.img_dir_path,img)

        # Get image dimensions
        width, height = Image.open(im_path).size

        # Pack image info
        #roidb = [{'image': im_path, 'width': width, 'height': height, 'flipped': False}]
        r = {'image': im_path, 'width': width, 'height': height, 'flipped': False}
        roidb.append(r)

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
    
    #print("Creating the Model")
    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    #print("Defining Test Iter")
    test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=args.batch_size, nGPUs=1, threads=1,
                                   crop_size=None, test_scale=config.TEST.SCALES[0],
                                   num_classes=db_info.num_classes)
    
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(symbol=sym,
                        context=context,
                        data_names=[k[0] for k in test_iter.provide_data_single],
                        label_names=None)
    # TODO: just to test the change of order, for refactor
    mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                            convert=True, process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=args.batch_size)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections= []
    all_masks = []
    print("Scales=",config.TEST.SCALES)
    print("Jobs=",config.TEST.CONCURRENT_JOBS)
    print("BATCH_IMAGES=",config.TEST.BATCH_IMAGES)
    if config.TEST.CONCURRENT_JOBS == 1:
        for s in config.TEST.SCALES:
            # Set tester scale
            tester.set_scale(s)
            # Perform detection
            detections, masks = tester.get_detections(vis=False, vis_path="./data/demo_batch/viz", evaluate=False, cache_name=None)
            all_detections.append(detections) # length = 19
            all_masks.append(masks)
            #all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=args.batch_size)
    file_name, out_extension = os.path.splitext(os.path.basename(im_path))
    all_detections, all_masks = tester.aggregateSingle(all_detections, all_masks, vis=True, cache_name=None, vis_path='./data/demo_batch/batch_results',
                                          vis_name='{}_detections'.format(file_name), vis_ext=out_extension)
Exemple #16
0
def main():
    ###################################################################################################
    #Arguments need be set
    # path to the image set
    mypath = './data/openimages/images/validation/'
    # adjust number of iteration to accomodate memory limit, greater consumes less memory but slower
    num_iter = 1
    # store bbox greater than confidence threshold into output file
    confidence_thred = 0.
    # set output file
    submit_file_name = open('test_output/Mango_output.csv', 'w')
    # set class name, this should be exactly the same as the 'classes' array in the training file
    #classes = get_class_name()
    classes = [
        '__background__',
        'Mango'  # '/m/0fldg'
    ]
    ####################################################################################################

    csvwriter = csv.writer(submit_file_name, delimiter=',')
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    onlyfiles = get_image_name(classes, mypath.split('/')[4])
    num_files = len(onlyfiles)
    batch_size = num_files / num_iter
    class_symbol = get_class_symbol()
    for i in range(num_iter):
        #if i < 8:
        #    continue
        im_path = []
        im_name = []
        for j in range(batch_size):
            im_path.append(mypath + onlyfiles[i * batch_size + j])
            im_name.append(onlyfiles[i * batch_size + j].split('.')[0])
        roidb = []

        for path in im_path:
            width, height = Image.open(path).size
            roidb.append({
                'image': path,
                'width': width,
                'height': height,
                'flipped': False
            })

        # Creating the Logger
        logger, output_path = create_logger(config.output_path, args.cfg,
                                            config.dataset.image_set)

        # Pack db info
        db_info = EasyDict()
        db_info.name = 'coco'
        db_info.result_path = 'data/demo'

        # Categories the detector trained for:
        db_info.classes = classes
        db_info.num_classes = len(db_info.classes)

        # Create the model
        sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
        #sym_inst = sym_def(n_proposals=400, test_nbatch=1)
        sym_inst = sym_def(n_proposals=400)
        sym = sym_inst.get_symbol_rcnn(config,
                                       is_train=False,
                                       num_classes=len(classes))
        test_iter = MNIteratorTest(roidb=roidb,
                                   config=config,
                                   batch_size=1,
                                   nGPUs=1,
                                   threads=1,
                                   crop_size=None,
                                   test_scale=config.TEST.SCALES[0],
                                   num_classes=db_info.num_classes)
        # Create the module
        shape_dict = dict(test_iter.provide_data_single)
        sym_inst.infer_shape(shape_dict)
        mod = mx.mod.Module(
            symbol=sym,
            context=context,
            data_names=[k[0] for k in test_iter.provide_data_single],
            label_names=None)
        mod.bind(test_iter.provide_data,
                 test_iter.provide_label,
                 for_training=False)

        # Initialize the weights
        model_prefix = os.path.join(output_path, args.save_prefix)
        arg_params, aux_params = load_param(model_prefix,
                                            config.TEST.TEST_EPOCH,
                                            convert=True,
                                            process=True)

        mod.init_params(arg_params=arg_params, aux_params=aux_params)

        # Create the tester
        tester = Tester(mod,
                        db_info,
                        roidb,
                        test_iter,
                        cfg=config,
                        batch_size=1)

        # Sequentially do detection over scales
        # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
        all_detections = []
        for s in config.TEST.SCALES:
            # Set tester scale
            tester.set_scale(s)
            # Perform detection
            all_detections.append(
                tester.get_detections(vis=False,
                                      evaluate=False,
                                      cache_name=None))

        # Aggregate results from multiple scales and perform NMS
        tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
        file_name, out_extension = os.path.splitext(os.path.basename(path))
        all_detections = tester.aggregate(all_detections,
                                          vis=False,
                                          cache_name=None,
                                          vis_path='./data/demo/',
                                          vis_name=None,
                                          vis_ext=out_extension)
        for j in range(len(im_name)):
            box_pred = []
            for k in range(1, len(classes)):
                if all_detections[k][j].shape[0] != 0:
                    for l in range(all_detections[k][j].shape[0]):
                        if all_detections[k][j][l][4] > confidence_thred:
                            one_box = [
                                class_symbol[k],
                                str(all_detections[k][j][l][4]),
                                str(
                                    min(
                                        all_detections[k][j][l][0] /
                                        roidb[j]['width'], 1.0)),
                                str(
                                    min(
                                        all_detections[k][j][l][1] /
                                        roidb[j]['height'], 1.0)),
                                str(
                                    min(
                                        all_detections[k][j][l][2] /
                                        roidb[j]['width'], 1.0)),
                                str(
                                    min(
                                        all_detections[k][j][l][3] /
                                        roidb[j]['height'], 1.0))
                            ]
                            box_pred.append(' '.join(one_box))
            csvwriter.writerow([im_name[j], ' '.join(box_pred)])
    submit_file_name.close()
Exemple #17
0
def main():
    args = parser()
    update_config(args.cfg)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{
        'image': args.im_path,
        'width': width,
        'height': height,
        'flipped': False
    }]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg,
                                        config.dataset.image_set)

    # Create the model and initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix,
                                        config.TEST.TEST_EPOCH,
                                        convert=True,
                                        process=True)

    # Get the symbol definition
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [
        u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
        u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
        u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog',
        u'horse', u'sheep', u'cow', u'elephant', u'bear', u'zebra', u'giraffe',
        u'backpack', u'umbrella', u'handbag', u'tie', u'suitcase', u'frisbee',
        u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
        u'baseball glove', u'skateboard', u'surfboard', u'tennis racket',
        u'bottle', u'wine\nglass', u'cup', u'fork', u'knife', u'spoon',
        u'bowl', u'banana', u'apple', u'sandwich', u'orange', u'broccoli',
        u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
        u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop',
        u'mouse', u'remote', u'keyboard', u'cell phone', u'microwave', u'oven',
        u'toaster', u'sink', u'refrigerator', u'book', u'clock', u'vase',
        u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush'
    ]
    db_info.num_classes = len(db_info.classes)

    # Perform detection for each scale in parallel
    p_args = []
    for s in config.TEST.SCALES:
        p_args.append([
            s, context, config, sym_def, roidb, db_info, arg_params, aux_params
        ])
    pool = Pool(len(config.TEST.SCALES))
    all_detections = pool.map(scale_worker, p_args)

    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    all_detections = tester.aggregate(all_detections,
                                      vis=True,
                                      cache_name=None,
                                      vis_path='./data/demo/',
                                      vis_name='demo_detections')
Exemple #18
0
    							default='configs/faster/sniper_res_e2e.yml',type=str)
    arg_parser.add_argument('--display', dest='display', help='Number of epochs between displaying loss info',
                            default=100, type=int)
    arg_parser.add_argument('--momentum', dest='momentum', help='BN momentum', default=0.995, type=float)
    arg_parser.add_argument('--save_prefix', dest='save_prefix', help='Prefix used for snapshotting the network',
                            default='SNIPER', type=str)
    arg_parser.add_argument('--set', dest='set_cfg_list', help='Set the configuration fields from command line',
                            default=None, nargs=argparse.REMAINDER)

    return arg_parser.parse_args()


if __name__ == '__main__':

    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    context = [mx.gpu(int(gpu)) for gpu in config.gpus.split(',')]
    nGPUs = len(context)
    batch_size = nGPUs * config.TRAIN.BATCH_IMAGES
    print("batch size is", batch_size)

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Create roidb
    image_sets = [iset for iset in config.dataset.image_set.split('+')]
    roidbs = [load_proposal_roidb(config.dataset.dataset, image_set, config.dataset.root_path,
        config.dataset.dataset_path,
Exemple #19
0
import os
import argparse
import sys
import logging
import pprint
import cv2

sys.path.insert(0, 'lib')
from configs.faster.default_configs import config, update_config
import numpy as np
# get config
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
cur_path = os.path.abspath(os.path.dirname(__file__))
update_config(cur_path + '/configs/faster/res101_mx_3k.yml')

import mxnet as mx
from symbols import *

from bbox.bbox_transform import bbox_pred, clip_boxes
from demo.module import MutableModule
from demo.linear_classifier import train_model, classify_rois
from demo.vis_boxes import vis_boxes
from demo.image import resize, transform
from demo.load_model import load_param
from demo.tictoc import tic, toc
from demo.nms import nms
import pickle
from symbols.faster.resnet_mx_101_e2e_3k_demo import resnet_mx_101_e2e_3k_demo, checkpoint_callback