def get_roidb(imdb_name, rpn_file=None):
    #imdb = get_imdb(imdb_name)
    imdb = caltech('train', args.dataset_path)
    print 'Loaded dataset `{:s}` for training'.format(imdb.name)
    imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
    print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
    if rpn_file is not None:
        imdb.config['rpn_file'] = rpn_file
    roidb = get_training_roidb(imdb)
    return roidb, imdb
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_voc_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True


if __name__ == '__main__':
    from datasets.caltech import caltech
    d = caltech('train', '/wwx/faster_rcnn_pytorch/data/VOCdevkit')
    res = d.roidb
    from IPython import embed;

    embed()
    print pretrained_caffemodel
    max_iterations = 45000

    cfg_from_file(cfg_file)
    cfg.GPU_ID = 0

    # set up global caffe mode
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    caffe.set_mode_gpu()
    caffe.set_device(0)

    # set up train imdb
    #imdb = pascal_voc_person('person_trainval','2007')
    #imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
    
    imdb = caltech(train = 1)
    
    roidb = get_training_roidb(imdb)
    print '{:d} roidb entries'.format(len(roidb))
    
    # setup output result directory
    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    # train net
    train_net(train_solver, roidb, output_dir,
              pretrained_model=pretrained_caffemodel,
              max_iters=max_iterations)

Beispiel #4
0
for year in ['2007', '2012']:
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up inria_<split> using selective search "fast" mode
inria_devkit_path = '/opt/data/ds/pedestrians/INRIA-faster'
for split in ['train', 'test']:
    name = '{}_{}'.format('inria', split)
    __sets[name] = (lambda split=split: inria(split, inria_devkit_path))


caltech_devkit_path = '/opt/data/ds/pedestrians/CALTECH-faster'
for split in ['train', 'test']:
    name = '{}_{}'.format('caltech', split)
    __sets[name] = (lambda split=split: caltech(split, caltech_devkit_path))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
    for split in ['test', 'test-dev']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

def get_imdb(name):
    """Get an imdb (image database) by name."""
               .format(self._devkit_path, self._get_comp_id(),
                       self._image_set, output_dir)
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_voc_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

if __name__ == '__main__':
    from datasets.caltech import caltech
    d = caltech('train', '/home/deeplearning_2/congyin.2017/lab2/py-faster-rcnn/data/VOCdevkit')
    res = d.roidb
    from IPython import embed; embed()
for year in ['2017']:
    for split in ['train', 'val', 'trainval', 'test', 'all']:
        name = 'cam2_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: cam2(split, year))

# Set up sun_2012_<split>
for year in ['2012']:
    for split in ['taste', 'all', 'test', 'train']:
        name = 'sun_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: sun(split, year))

# Set up caltech_2009_<split>
for year in ['2009']:
    for split in ['val', 'train', 'test', 'all', 'taste', 'medium']:
        name = 'caltech_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: caltech(split, year))

# Set up kitti_2013_<split> # TODO
for year in ['2013']:
    for split in ['val', 'train', 'test', 'all']:
        name = 'kitti_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: kitti(split, year))

# Set up inria_2005_<split> # TODO
for year in ['2005']:
    for split in ['val', 'train', 'test', 'all']:
        name = 'inria_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: inria(split, year))


def get_imdb(name):
Beispiel #7
0
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012']:
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
    for split in ['test', 'test-dev']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

__sets['caltech_train'] = (lambda: caltech())

def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()

def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()
Beispiel #8
0
import argparse
import pprint
import time, os, sys

if __name__ == '__main__':

    gpu_id = 0
    prototxt = 'models/pascal_voc_person/VGG16/test.prototxt'
    caffemodel = 'output/faster_rcnn_end2end/caltech/stride_7_iter_20000_model/vgg16_faster_rcnn_iter_20000.caffemodel'
    cfg_file = sys.path[4] + '/' +'models/pascal_voc_person/VGG16/vgg16_faster_rcnn.yml'
    
    prototxt = sys.path[4] + '/' + prototxt
    caffemodel = sys.path[4] + '/' + caffemodel


    cfg_from_file(cfg_file)
    cfg.GPU_ID = gpu_id

    # RPN test settings
    cfg.TEST.RPN_PRE_NMS_TOP_N = -1
    cfg.TEST.RPN_POST_NMS_TOP_N = 400


    caffe.set_mode_gpu()
    caffe.set_device(gpu_id)
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    net.name = os.path.splitext(os.path.basename(caffemodel))[0]

    imdb = caltech(test= 1)
    imdb_boxes = imdb_proposals(net, imdb)
Beispiel #9
0
"""Factory method for easily getting imdbs by name."""

__sets = {}

from datasets.caltech import caltech
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
import numpy as np


#Set up Caltech
    
for version in ["all", "reasonable", "person_class"]:
    for split in ['train', 'val', 'trainval', 'test']:         #I need an all in here meaning that I will conain both data1
        name = 'caltech_{}_{}'.format(version, split)
        __sets[name] = (lambda split=split, version=version : caltech(version, split))

# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012',"0712"]:        #I need an (2007+12)  in here
    for split in ['train', 'val', 'trainval', 'test']:         #I need an all in here meaning that I will conain both data1
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
Beispiel #10
0
from __future__ import print_function

__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.imagenet import imagenet
from datasets.vg import vg
from datasets.caltech import caltech
import numpy as np

# Set up caltech_<version>_<split>
caltech_devkit_path = "/data1/Datasets/caltech_pedestrian/caltech_convert"
for version in ["all", "reasonable", "person"]:
    for split in ["train", "val", "trainval", "test"]:
        name = 'caltech_{}_{}'.format(version, split)
        __sets[name] = (lambda split=split, version=version: caltech(
            version, split, devkit_path=caltech_devkit_path))

# Set up voc_<year>_<split>
for year in ['2007', '2012']:
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2014_cap_<split>
for year in ['2014']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
    for split in ['test', 'test-dev']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

caltech_path = '/home/ubuntu/abhishek/attempt2/py-faster-rcnn/data/caltech'
for split in ['train', 'test']:
    name = '{}_{}'.format('caltech', split)
    __sets[name] = (lambda split=split: caltech(split, None, caltech_path))


def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()

def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()
__sets = {}

from datasets.inria import inria
from datasets.eth import eth
from datasets.caltech import caltech
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
import numpy as np
''' add other dataset '''

# Set up caltech_<version>_<split>
for version in ["all", "reasonable", "person"]:
    for split in ["train", "val", "trainval", "test"]:
        name = 'caltech_{}_{}'.format(version, split)
        __sets[name] = (
            lambda split=split, version=version: caltech(version, split))

# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012', '0712']:
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
        current_frames = 0
        for set_num in target_frames:
            target_path = os.path.join(output_path, set_num)
            if not os.path.exists(target_path):
                os.makedirs(target_path)
            for v_num, file_list in target_frames[set_num].items():
                current_frames += detection_to_file(
                    target_path, v_num, file_list, detect, total_frames,
                    current_frames)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_caltech_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)


if __name__ == '__main__':

    from datasets.caltech import caltech
    d = caltech("trainval")
    res = d.roidb
    from IPython import embed
    embed()
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_voc_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True


if __name__ == '__main__':
    from datasets.caltech import caltech
    d = caltech('all', '2012')
    res = d.roidb
    from IPython import embed
    embed()
__sets = {}

import datasets
from datasets.caltech import caltech
from datasets.kaist_rgb import kaist_rgb
from datasets.kaist_thermal import kaist_thermal
from datasets.kaist_fusion import kaist_fusion
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco

import numpy as np

# set up caltech
imageset = 'test'
name = 'caltech_{}'.format(imageset)
__sets[name] = (lambda imageset=imageset: caltech('test'))

imageset = 'train04'
name = 'caltech_{}'.format(imageset)
__sets[name] = (lambda imageset=imageset: caltech('train04'))

imageset = 'test-all'
name = 'kaist_{}'.format(imageset)
__sets[name] = (lambda imageset=imageset: kaist_rgb('test-all'))

imageset = 'train-all02'
name = 'kaist_{}'.format(imageset)
__sets[name] = (lambda imageset=imageset: kaist_rgb('train-all02'))

imageset = 'test-all-thermal'
name = 'kaist_{}'.format(imageset)
Beispiel #16
0
    def __init__(self, image_set, year, devkit_path=None, shuffled=None):
        
        imdb.__init__(self, 'merged_' + image_set)
        self._year = year
        self._image_set = image_set
        self._anno_set_dir = image_set
        if "val" in image_set:
            self._image_set_dir = "val"
            if "val1" in image_set:
                self._anno_set_dir = "val1"
            if "val2" in image_set:
                self._anno_set_dir = "val2"
        elif "train" in image_set:
            self._anno_set_dir = "train"
        elif "test" in image_set:
            self._anno_set_dir = "test"
        
        if image_set == "train":
            self.imdbs = [imagenet(image_set),\
                        coco(image_set, '2015'),\
                        cam2(image_set,'2017'),\
                        sun(image_set,'2012'),\
                        caltech(image_set,'2009'),\
                        kitti(image_set,'2013'),\
                        inria(image_set,'2005'),\
                        pascal_voc(image_set,'2007'),\
                        pascal_voc(image_set,'2012')]
        elif image_set == "test":
            self.imdbs = [imagenet('val'),\
                        coco('test-dev', '2015'),\
                        cam2('all','2017'),\
                        sun('test','2012'),\
                        caltech('test','2009'),\
                        kitti('val','2013'),\
                        inria('all','2005'),\
                        pascal_voc('test','2007')]

        self.roidbs = [None for _ in range(len(self.datasets))]
        for idx,imdb in enumerate(self.imdbs):
            self.roidbs[idx] = get_training_roidb(imdb)

        self._devkit_path = self._get_default_path() if devkit_path is None \
                            else devkit_path
        self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
        self._classes = ('__background__', # always index 0
                         'voc',
                         'imagenet',
                         'caltech',
                         'coco',
                         'sun',
                         'kitti',
                         'inria',
                         'cam2')
        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._image_ext = '.jpg'
        self._image_index = self._load_image_set_index()
        # Default to roidb handler
        self._roidb_handler = self.selective_search_roidb
        self._salt = str(uuid.uuid4())
        self._comp_id = 'comp4'

        # PASCAL specific config options
        self.config = {'cleanup'     : True,
                       'use_salt'    : True,
                       'use_diff'    : False,
                       'matlab_eval' : False,
                       'rpn_file'    : None,
                       'min_size'    : 2}

        assert os.path.exists(self._devkit_path), \
                'VOCdevkit path does not exist: {}'.format(self._devkit_path)
        assert os.path.exists(self._data_path), \
                'Path does not exist: {}'.format(self._data_path)