def get_config(): logger.set_logger_dir( os.path.join('train_log', 'svhn-dorefa-{}'.format(args.dorefa))) # prepare dataset d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test') augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = MultiProcessRunnerZMQ(data_train, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return TrainConfig( data=QueueInput(data_train), callbacks=[ ModelSaver(), InferenceRunner( data_test, [ScalarStats('cost'), ClassificationError('wrong_tensor')]) ], model=Model(), max_epoch=200, )
def get_data(train_or_test): isTrain = train_or_test == 'train' pp_mean = dataset.SVHNDigit.get_per_pixel_mean() if isTrain: d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') ds = RandomMixData([d1, d2]) else: ds = dataset.SVHNDigit('test') if isTrain: augmentors = [ imgaug.CenterPaste((40, 40)), imgaug.Brightness(10), imgaug.Contrast((0.8, 1.2)), imgaug. GaussianDeform( # this is slow. without it, can only reach 1.9% error [(0.2, 0.2), (0.2, 0.8), (0.8, 0.8), (0.8, 0.2)], (40, 40), 0.2, 3), imgaug.RandomCrop((32, 32)), imgaug.MapImage(lambda x: x - pp_mean), ] else: augmentors = [imgaug.MapImage(lambda x: x - pp_mean)] ds = AugmentImageComponent(ds, augmentors) ds = BatchData(ds, 128, remainder=not isTrain) if isTrain: ds = PrefetchData(ds, 5, 5) return ds
def get_config(): logger.auto_set_dir() # prepare dataset d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test') augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), # imgaug.GaussianDeform( # this is slow but helpful. only use it when you have lots of cpus # [(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)], # (40,40), 0.2, 3), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = PrefetchDataZMQ(data_train, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return TrainConfig( dataflow=data_train, callbacks=[ ModelSaver(), InferenceRunner(data_test, [ScalarStats('cost'), ClassificationError()]) ], model=Model(), max_epoch=200, )
def get_config(): #logger.auto_set_dir() logger.set_logger_dir('./train_log/svhn_dorefa_adam01_zp_025_'+str(args.dorefa)+'_exp1') # prepare dataset d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test') augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), # imgaug.GaussianDeform( # this is slow but helpful. only use it when you have lots of cpus # [(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)], # (40,40), 0.2, 3), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = PrefetchDataZMQ(data_train, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return TrainConfig( data=QueueInput(data_train), callbacks=[ ModelSaver(max_to_keep=2), InferenceRunner(data_test, [ScalarStats('cost'), ClassificationError('wrong_tensor')]) ], model=Model(), max_epoch=200, )
def get_data(): d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') dataset_train = RandomMixData([d1, d2]) dataset_test = dataset.SVHNDigit('test', shuffle=False) dataset_train = BatchData(dataset_train, 256) dataset_test = BatchData(dataset_test, 128, remainder=True) return dataset_train, dataset_test
def get_data(): d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test', shuffle=False) augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = PrefetchData(data_train, 5, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return data_train, data_test
def get_data(): d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_train = DisturbLabel(data_train, args.prob) data_test = dataset.SVHNDigit('test') augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = MultiProcessRunner(data_train, 5, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return data_train, data_test
def get_data(): d1 = dataset.SVHNDigit('train') d2 = dataset.SVHNDigit('extra') data_train = RandomMixData([d1, d2]) data_test = dataset.SVHNDigit('test', shuffle=False) augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), imgaug.GaussianDeform( # this is slow. only use it when you have lots of cpus [(0.2, 0.2), (0.2, 0.8), (0.8, 0.8), (0.8, 0.2)], (40, 40), 0.2, 3), ] data_train = AugmentImageComponent(data_train, augmentors) data_train = BatchData(data_train, 128) data_train = PrefetchData(data_train, 5, 5) augmentors = [imgaug.Resize((40, 40))] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) return data_train, data_test
'--load', help='load a checkpoint, or a npz (given as the pretrained model)') parser.add_argument('--data', help='ILSVRC dataset dir') parser.add_argument( '--dorefa', required=True, help='number of bits for W,A,G, separated by comma. W="t" means TTQ') parser.add_argument( '--run', help='run on a list of images with the pretrained model', nargs='*') parser.add_argument('--eval', action='store_true') args = parser.parse_args() if args.eval: BATCH_SIZE = 128 data_test = dataset.SVHNDigit('test') augmentors = [ imgaug.Resize((40, 40)), imgaug.Brightness(30), imgaug.Contrast((0.5, 1.5)), ] data_test = AugmentImageComponent(data_test, augmentors) data_test = BatchData(data_test, 128, remainder=True) eval_classification(Model(), get_model_loader(args.load), data_test) sys.exit() BITW, BITA, BITG = map(int, args.dorefa.split(',')) config = get_config() launch_train_with_config(config, SimpleTrainer())