Exemplo n.º 1
0
    def load_data(self):
        """
        """
        dataset = '-'.join([self.image_set, self.year])

        voc = dataset_meta[dataset]
        workdir, filepath, datadir = self._valid_path_append(
            self.path, '', voc['file'], voc['subdir'])

        if not os.path.exists(filepath):
            self.fetch_dataset(voc['url'], voc['file'], filepath, voc['size'])
            with tarfile.open(filepath) as f:
                f.extractall(workdir)

        # define the path structure of the dataset
        config = dict()
        config['root'] = datadir
        config['index_path'] = os.path.join(datadir, 'ImageSets', 'Main',
                                            self.image_set + '.txt')
        config['image_path'] = os.path.join(datadir, 'JPEGImages')
        config['annot_path'] = os.path.join(datadir, 'Annotations')
        config['file_ext'] = ".jpg"

        # write cache name
        cache_name = 'pascal_{}-{}.pkl'.format(self.image_set, self.year,
                                               self.MAX_SIZE, self.MIN_SIZE)

        cache_dir = get_data_cache_dir(datadir, subdir='pascalvoc_cache')
        config['cache_path'] = os.path.join(cache_dir, cache_name)
        config['use_diff'] = False

        return config
Exemplo n.º 2
0
    def load_data(self):
        """
        """
        dataset = '-'.join([self.image_set, self.year])

        voc = dataset_meta[dataset]
        workdir, filepath, datadir = self._valid_path_append(
            self.path, '', voc['file'], voc['subdir'])

        if not os.path.exists(filepath):
            self.fetch_dataset(voc['url'], voc['file'], filepath, voc['size'])
            with tarfile.open(filepath) as f:
                f.extractall(workdir)

        # define the path structure of the dataset
        config = dict()
        config['root'] = datadir
        config['index_path'] = os.path.join(datadir, 'ImageSets', 'Main',
                                            self.image_set + '.txt')
        config['image_path'] = os.path.join(datadir, 'JPEGImages')
        config['annot_path'] = os.path.join(datadir, 'Annotations')
        config['file_ext'] = ".jpg"

        # write cache name
        cache_name = 'pascal_{}-{}.pkl'.format(self.image_set, self.year,
                                               self.MAX_SIZE, self.MIN_SIZE)

        cache_dir = get_data_cache_dir(datadir, subdir='pascalvoc_cache')
        config['cache_path'] = os.path.join(cache_dir, cache_name)
        config['use_diff'] = False

        return config
Exemplo n.º 3
0
    def __init__(self, set_name, repo_dir,
                 media_params, target_size,
                 archive_dir=None,
                 target_conversion='ascii_to_binary',
                 index_file=None,
                 shuffle=False, reshuffle=False,
                 datum_dtype=np.uint8, target_dtype=np.int32,
                 onehot=True, nclasses=None, subset_percent=100,
                 ingest_params=None,
                 alphabet=None):
        if onehot is True and nclasses is None:
            raise ValueError('nclasses must be specified for one-hot labels')
        if target_conversion not in self._converters_:
            raise ValueError('Unknown target type %s' % target_conversion)

        self.set_name = set_name
        repo_dir = os.path.expandvars(os.path.expanduser(repo_dir))
        if not os.path.exists(repo_dir):
            raise IOError('Directory not found: %s' % repo_dir)
        self.macro_start = 0
        self.repo_dir = repo_dir
        parent_dir = os.path.split(repo_dir)[0]
        self.archive_prefix = 'archive-'
        if archive_dir is None:
            self.archive_dir = get_data_cache_dir(set_name + '-ingested')
        else:
            self.archive_dir = os.path.expandvars(os.path.expanduser(archive_dir))
        self.item_count = ct.c_int(0)
        self.bsz = self.be.bsz
        self.buffer_id = 0
        self.start_idx = 0
        self.media_params = media_params
        self.shape = media_params.get_shape()
        self.datum_size = media_params.datum_size()
        self.target_size = target_size
        self.target_conversion = self._converters_[target_conversion]
        if index_file is None:
            self.index_file = os.path.join(parent_dir, set_name + '-index.csv')
        else:
            self.index_file = index_file
        self.shuffle = shuffle
        self.reshuffle = reshuffle
        self.datum_dtype = datum_dtype
        self.target_dtype = target_dtype
        self.onehot = onehot
        self.nclasses = nclasses
        self.subset_percent = int(subset_percent)
        self.ingest_params = ingest_params
        if alphabet is None:
            self.alphabet = None
        else:
            self.alphabet = ct.c_char_p(alphabet)
        self.load_library()
        self.alloc()
        self.start()
        atexit.register(self.stop)
Exemplo n.º 4
0
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)

# hyperparameters
assert args.batch_size is 1, "Faster-RCNN only supports batch size 1"
assert 'train' in args.manifest

rpn_rois_per_img = 256  # number of rois to sample to train rpn
frcn_rois_per_img = 128  # number of rois to sample to train frcn

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
be.enable_winograd = 4  # default to winograd 4 for fast autotune

# directory to store VGG weights
cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache')

# build data loader
# get config file for PASCALVOC
config = PASCALVOC(args.manifest['train'], args.manifest_root,
                   width=args.width, height=args.height,
                   rois_per_img=rpn_rois_per_img, inference=False)
config['subset_fraction'] = float(args.subset_pct / 100.0)

train_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img)

# build the Faster-RCNN model
model = faster_rcnn.build_model(train_set, frcn_rois_per_img, inference=False)

# set up cost different branches, respectively
weights = 1.0 / (rpn_rois_per_img)
Exemplo n.º 5
0
args = parser.parse_args()

assert args.model_file is not None, "Model file required for Faster-RCNN testing"
assert 'val' in args.manifest, "Path to manifest file requred"

# hyperparameters
assert args.batch_size is 1, "Faster-RCNN only supports batch size 1"
rpn_rois_per_img = 256
frcn_rois_per_img = 128

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# build data loader
cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache')
config = PASCALVOC(args.manifest['val'],
                   args.manifest_root,
                   width=args.width,
                   height=args.height,
                   rois_per_img=rpn_rois_per_img,
                   inference=True)

valid_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img)

num_classes = valid_set.num_classes

# build the Faster-RCNN network
(model, proposalLayer) = faster_rcnn.build_model(valid_set,
                                                 frcn_rois_per_img,
                                                 inference=True)
Exemplo n.º 6
0
parser.add_argument('--height', type=int, help='image height')
parser.add_argument('--width', type=int, help='image width')
parser.add_argument('--subset_pct', type=float, default=100.0,
                    help='fraction of full training data set to use')
parser.add_argument('--ssd_config', action='append', required=True, help='path to ssd json file')
parser.add_argument('--lr_scale', type=float, default=1.0, help='scale lr by this amount')
parser.add_argument('--image_sample_dir', type=str, help='path to save image samples')
parser.add_argument('--num_images', type=int, help='number of images to save')
parser.add_argument('--lr_step', type=int, action='append', help='epochs to step lr')

args = parser.parse_args(gen_be=False)
if args.ssd_config:
    args.ssd_config = {k: v for k, v in [ss.split(':') for ss in args.ssd_config]}

# directory to store VGG weights
cache_dir = get_data_cache_dir(args.data_dir, subdir='ssd_cache')
train_config = json.load(open(args.ssd_config['train']), object_pairs_hook=OrderedDict)
val_config = json.load(open(args.ssd_config['val']), object_pairs_hook=OrderedDict)

if args.batch_size == 0:
    args.batch_size = train_config["batch_size"]

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=args.batch_size,
                 device_id=args.device_id,
                 compat_mode='caffe',
                 rng_seed=1,
                 deterministic_update=True,
                 deterministic=True, max_devices=args.max_devices)
be.enable_winograd = 0
Exemplo n.º 7
0
                    help='path to save image samples')
parser.add_argument('--num_images', type=int, help='number of images to save')
parser.add_argument('--lr_step',
                    type=int,
                    action='append',
                    help='epochs to step lr')

args = parser.parse_args(gen_be=False)
if args.ssd_config:
    args.ssd_config = {
        k: v
        for k, v in [ss.split(':') for ss in args.ssd_config]
    }

# directory to store VGG weights
cache_dir = get_data_cache_dir(args.data_dir, subdir='ssd_cache')
train_config = json.load(open(args.ssd_config['train']),
                         object_pairs_hook=OrderedDict)
val_config = json.load(open(args.ssd_config['val']),
                       object_pairs_hook=OrderedDict)

if args.batch_size == 0:
    args.batch_size = train_config["batch_size"]

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=args.batch_size,
                 device_id=args.device_id,
                 compat_mode='caffe',
                 rng_seed=1,
                 deterministic_update=True,