Exemple #1
0
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
    for split in ['test', 'test-dev']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

inria_devkit_path = '/home/deboc/py-faster-rcnn/data/INRIA_Person_devkit'
for split in ['train', 'test']:
    name = '{}_{}'.format('inria', split)
    __sets[name] = (lambda split=split: inria(split, inria_devkit_path))


def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()


def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()
Exemple #2
0
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
for year in ['2014']:
    for split in ['train', 'val', 'minival', 'valminusminival']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

# Set up coco_2015_<split>
for year in ['2015']:
    for split in ['test', 'test-dev']:
        name = 'coco_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: coco(split, year))

inria_devkit_path = '/home/deboc/py-faster-rcnn/data/INRIA_Person_devkit'
for split in ['train', 'test']:
    name = '{}_{}'.format('inria', split)
    __sets[name] = (lambda split=split: inria(split, inria_devkit_path))

def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()

def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()
for year in ['2009']:
    for split in ['val', 'train', 'test', 'all', 'taste', 'medium']:
        name = 'caltech_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: caltech(split, year))

# Set up kitti_2013_<split> # TODO
for year in ['2013']:
    for split in ['val', 'train', 'test', 'all']:
        name = 'kitti_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: kitti(split, year))

# Set up inria_2005_<split> # TODO
for year in ['2005']:
    for split in ['val', 'train', 'test', 'all']:
        name = 'inria_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: inria(split, year))


def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()


def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()


def create_unite(names):
Exemple #4
0
from fast_rcnn.config import cfg, cfg_from_file, get_output_dir
from datasets.caltech import caltech
from datasets.inria import inria
import datasets.imdb
import caffe
import numpy as np
import sys

if __name__ == '__main__':


    #set up global variable 'cfg' for train
    train_solver = sys.path[4] + '/' + 'models/pascal_voc_person/VGG16/solver.prototxt'
    pretrained_caffemodel = sys.path[4] + '/' + 'data/imagenet_models/VGG16.caffemodel'
    max_iterations = 4000
    train_imdb = inria(train = 1)
    roidb = get_training_roidb(train_imdb)
    
    # set up global varibles for validation
    validation_network = sys.path[4]+'/'+ 'models/pascal_voc_person/VGG16/test.prototxt'
    validation_imdb = inria(test = 1)

    # set up global caffe mode
    cfg_file = sys.path[4] + '/' + 'models/pascal_voc_person/VGG16/vgg16_faster_rcnn.yml'
    
    if 1:
        train_solver = 'models/pascal_voc_person/VGG16/solver.prototxt'
        pretrained_caffemodel = 'data/imagenet_models/VGG16.caffemodel'
        validation_network = 'models/pascal_voc_person/VGG16/test.prototxt'
        cfg_file = 'models/pascal_voc_person/VGG16/vgg16_faster_rcnn.yml'
    
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_voc_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True


if __name__ == '__main__':
    from datasets.inria import inria
    d = inria('all', '2005')
    res = d.roidb
    from IPython import embed
    embed()
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
import numpy as np
''' add other dataset '''

for version in ["all", "reasonable", "person"]:
    for split in ["train", "val", "trainval", "test"]:
        name = 'eth_{}_{}'.format(version, split)
        __sets[name] = (
            lambda split=split, version=version: eth(version, split))

for version in ["all", "reasonable", "person"]:
    for split in ["train", "val", "trainval", "test"]:
        name = 'inria_{}_{}'.format(version, split)
        __sets[name] = (
            lambda split=split, version=version: inria(version, split))

# Set up caltech_<version>_<split>
for version in ["all", "reasonable", "person"]:
    for split in ["train", "val", "trainval", "test"]:
        name = 'caltech_{}_{}'.format(version, split)
        __sets[name] = (
            lambda split=split, version=version: caltech(version, split))

# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012', '0712']:
    for split in ['train', 'val', 'trainval', 'test']:
        name = 'voc_{}_{}'.format(year, split)
        __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))

# Set up coco_2014_<split>
Exemple #7
0
import caffe
import argparse
import pprint
import time, os, sys

if __name__ == '__main__':
    gpu_id = 0
    prototxt = 'models/pascal_voc_person/VGG16/test.prototxt'
    caffemodel = 'output/faster_rcnn_end2end/inria/vgg16_faster_rcnn_iter_3000.caffemodel'
    cfg_file = sys.path[
        4] + '/' + 'models/pascal_voc_person/VGG16/vgg16_faster_rcnn.yml'

    prototxt = sys.path[4] + '/' + prototxt
    caffemodel = sys.path[4] + '/' + caffemodel

    comp_mode = False
    visable = True
    max_image = 10000

    cfg_from_file(cfg_file)
    cfg.GPU_ID = gpu_id

    caffe.set_mode_gpu()
    caffe.set_device(gpu_id)
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)
    net.name = os.path.splitext(os.path.basename(caffemodel))[0]

    imdb = inria(test=1)
    test_net(net, imdb, max_per_image=max_image, vis=visable)
        print('Running:\n{}'.format(cmd))
        status = subprocess.call(cmd, shell=True)

    def evaluate_detections(self, all_boxes, output_dir):
        self._write_caltech_results_file(all_boxes)
        self._do_python_eval(output_dir)
        if self.config['matlab_eval']:
            self._do_matlab_eval(output_dir)
        if self.config['cleanup']:
            for cls in self._classes:
                if cls == '__background__':
                    continue
                filename = self._get_voc_results_file_template().format(cls)
                os.remove(filename)

    def competition_mode(self, on):
        if on:
            self.config['use_salt'] = False
            self.config['cleanup'] = False
        else:
            self.config['use_salt'] = True
            self.config['cleanup'] = True

if __name__ == '__main__':

    from datasets.inria import inria
    d = inria("trainval")
    res = d.roidb
    from IPython import embed
    embed()
Exemple #9
0
    def __init__(self, image_set, year, devkit_path=None, shuffled=None):
        
        imdb.__init__(self, 'merged_' + image_set)
        self._year = year
        self._image_set = image_set
        self._anno_set_dir = image_set
        if "val" in image_set:
            self._image_set_dir = "val"
            if "val1" in image_set:
                self._anno_set_dir = "val1"
            if "val2" in image_set:
                self._anno_set_dir = "val2"
        elif "train" in image_set:
            self._anno_set_dir = "train"
        elif "test" in image_set:
            self._anno_set_dir = "test"
        
        if image_set == "train":
            self.imdbs = [imagenet(image_set),\
                        coco(image_set, '2015'),\
                        cam2(image_set,'2017'),\
                        sun(image_set,'2012'),\
                        caltech(image_set,'2009'),\
                        kitti(image_set,'2013'),\
                        inria(image_set,'2005'),\
                        pascal_voc(image_set,'2007'),\
                        pascal_voc(image_set,'2012')]
        elif image_set == "test":
            self.imdbs = [imagenet('val'),\
                        coco('test-dev', '2015'),\
                        cam2('all','2017'),\
                        sun('test','2012'),\
                        caltech('test','2009'),\
                        kitti('val','2013'),\
                        inria('all','2005'),\
                        pascal_voc('test','2007')]

        self.roidbs = [None for _ in range(len(self.datasets))]
        for idx,imdb in enumerate(self.imdbs):
            self.roidbs[idx] = get_training_roidb(imdb)

        self._devkit_path = self._get_default_path() if devkit_path is None \
                            else devkit_path
        self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
        self._classes = ('__background__', # always index 0
                         'voc',
                         'imagenet',
                         'caltech',
                         'coco',
                         'sun',
                         'kitti',
                         'inria',
                         'cam2')
        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._image_ext = '.jpg'
        self._image_index = self._load_image_set_index()
        # Default to roidb handler
        self._roidb_handler = self.selective_search_roidb
        self._salt = str(uuid.uuid4())
        self._comp_id = 'comp4'

        # PASCAL specific config options
        self.config = {'cleanup'     : True,
                       'use_salt'    : True,
                       'use_diff'    : False,
                       'matlab_eval' : False,
                       'rpn_file'    : None,
                       'min_size'    : 2}

        assert os.path.exists(self._devkit_path), \
                'VOCdevkit path does not exist: {}'.format(self._devkit_path)
        assert os.path.exists(self._data_path), \
                'Path does not exist: {}'.format(self._data_path)