Example #1
0
def get_config():
    logger.auto_set_dir()

    ds_train = get_data(['ply_data_train%i.h5' % i for i in range(5)])
    ds_train = FixedSizeData(ds_train, 2048 * 5)
    ds_test = get_data(['ply_data_test%i.h5' % i for i in range(2)])
    ds_test = FixedSizeData(ds_test, 250)

    return TrainConfig(
        model=Model(),
        dataflow=ds_train,
        callbacks=[
            ModelSaver(),
            MaxSaver('validation_accuracy'),
            InferenceRunner(ds_test, [
                ScalarStats('total_costs'),
                ScalarStats('accuracy'),
                ScalarStats('cls_costs')
            ]),
        ],
        extra_callbacks=[
            MovingAverageSummary(),
            ProgressBar(['tower0/total_costs', 'tower0/accuracy']),
            MergeAllSummaries(),
            RunUpdateOps()
        ],
        steps_per_epoch=ds_train.size(),
        max_epoch=100,
    )
Example #2
0
def get_config():
    logger.auto_set_dir()

    data3, wd2id = get_PennTreeBank()
    global VOCAB_SIZE
    VOCAB_SIZE = len(wd2id)
    steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN

    train_data = TensorInput(lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),
                             steps_per_epoch)
    val_data = TensorInput(lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),
                           (data3[1].shape[0] // BATCH - 1) // SEQ_LEN)

    M = Model()
    return TrainConfig(
        data=train_data,
        model=M,
        callbacks=[
            ModelSaver(),
            HyperParamSetterWithFunc('learning_rate', lambda e, x: x * 0.80
                                     if e > 6 else x),
            RunOp(lambda: M.reset_lstm_state()),
            FeedfreeInferenceRunner(val_data, [ScalarStats(['cost'])]),
            CallbackFactory(
                trigger_epoch=lambda self: self.trainer.add_scalar_summary(
                    'validation_perplexity',
                    np.exp(
                        self.trainer.stat_holder.get_stat_now('validation_cost'
                                                              ) / SEQ_LEN))),
            RunOp(lambda: M.reset_lstm_state()),
        ],
        max_epoch=70,
    )
Example #3
0
def log_init(args, model_cls):
    """
        Set the log root according to the args.log_dir and 
        log run info
    """
    logger.set_log_root(log_root=args.log_dir)
    logger.auto_set_dir(action='k')
    logger.info("Arguments: {}".format(args))
    logger.info("Model class is {}".format(model_cls))
    logger.info("TF version: {}".format(tf.__version__))
def get_config():
    logger.auto_set_dir()
    dataset_train = get_data()

    return TrainConfig(
        dataflow=dataset_train,
        callbacks=[ModelSaver(), OnlineTensorboardExport()],
        model=Model(),
        steps_per_epoch=dataset_train.size(),
        max_epoch=50,
    )
Example #5
0
def get_config():
    logger.auto_set_dir()
    dataset_train = get_data()

    return TrainConfig(
        dataflow=dataset_train,
        callbacks=[
            ModelSaver(),
            OnlineTensorboardExport()
        ],
        model=Model(),
        steps_per_epoch=dataset_train.size(),
        max_epoch=50,
    )
Example #6
0
def get_config():
    logger.auto_set_dir()

    data3, wd2id = get_PennTreeBank()
    global VOCAB_SIZE
    VOCAB_SIZE = len(wd2id)
    steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN

    train_data = TensorInput(
        lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),
        steps_per_epoch)
    val_data = TensorInput(
        lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),
        (data3[1].shape[0] // BATCH - 1) // SEQ_LEN)

    test_data = TensorInput(
        lambda: ptb_producer(data3[2], BATCH, SEQ_LEN),
        (data3[2].shape[0] // BATCH - 1) // SEQ_LEN)

    M = Model()
    return TrainConfig(
        data=train_data,
        model=M,
        callbacks=[
            ModelSaver(),
            HyperParamSetterWithFunc(
                'learning_rate',
                lambda e, x: x * 0.80 if e > 6 else x),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(val_data, [ScalarStats(['cost'])]),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(
                test_data,
                [ScalarStats(['cost'], prefix='test')], tower_name='InferenceTowerTest'),
            RunOp(lambda: M.reset_lstm_state()),
            CallbackFactory(
                trigger=lambda self:
                [self.trainer.monitors.put_scalar(
                    'validation_perplexity',
                    np.exp(self.trainer.monitors.get_latest('validation_cost') / SEQ_LEN)),
                 self.trainer.monitors.put_scalar(
                     'test_perplexity',
                     np.exp(self.trainer.monitors.get_latest('test_cost') / SEQ_LEN))]
            ),
        ],
        max_epoch=70,
    )
Example #7
0
def get_config():
    logger.auto_set_dir()

    data3, wd2id = get_PennTreeBank()
    global VOCAB_SIZE
    VOCAB_SIZE = len(wd2id)
    steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN

    train_data = TensorInput(
        lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),
        steps_per_epoch)
    val_data = TensorInput(
        lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),
        (data3[1].shape[0] // BATCH - 1) // SEQ_LEN)

    test_data = TensorInput(
        lambda: ptb_producer(data3[2], BATCH, SEQ_LEN),
        (data3[2].shape[0] // BATCH - 1) // SEQ_LEN)

    M = Model()
    return TrainConfig(
        data=train_data,
        model=M,
        callbacks=[
            ModelSaver(),
            HyperParamSetterWithFunc(
                'learning_rate',
                lambda e, x: x * 0.80 if e > 6 else x),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(val_data, [ScalarStats(['cost'])]),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(
                test_data,
                [ScalarStats(['cost'], prefix='test')], tower_name='InferenceTowerTest'),
            RunOp(lambda: M.reset_lstm_state()),
            CallbackFactory(
                trigger=lambda self:
                [self.trainer.monitors.put_scalar(
                    'validation_perplexity',
                    np.exp(self.trainer.monitors.get_latest('validation_cost') / SEQ_LEN)),
                 self.trainer.monitors.put_scalar(
                     'test_perplexity',
                     np.exp(self.trainer.monitors.get_latest('test_cost') / SEQ_LEN))]
            ),
        ],
        max_epoch=70,
    )
Example #8
0
def get_config(datadir, batch_size):
    logger.auto_set_dir('n')
    lmdbs = glob.glob(os.path.join(datadir, 'train*.lmdb'))
    ds_train = [
        YoutubeData(lmdb, shape=(128, 128), ego_motion_size=[17, 21, 25])
        for lmdb in lmdbs
    ]
    ds_train = RandomMixData(ds_train)
    ds_train = BatchData(ds_train, BATCH_SIZE)
    ds_train = PrefetchDataZMQ(ds_train, 8)

    lmdbs = glob.glob(os.path.join(datadir, 'val*.lmdb'))
    ds_val = [
        YoutubeData(lmdb, shape=(128, 128), ego_motion_size=[17, 21, 25])
        for lmdb in lmdbs
    ]
    ds_val = RandomMixData(ds_val)
    ds_val = BatchData(ds_val, BATCH_SIZE)
    ds_val = FixedSizeData(ds_val, 100)
    ds_val = PrefetchDataZMQ(ds_val, 8)

    steps_per_epoch = 1000

    return TrainConfig(dataflow=ds_train,
                       callbacks=[
                           ModelSaver(),
                           InferenceRunner(ds_val, [
                               ScalarStats('total_cost'),
                               ScalarStats('PSNR_IMPRO_t%i' % (SEQ_LEN - 1))
                           ])
                       ],
                       extra_callbacks=[
                           MovingAverageSummary(),
                           ProgressBar([
                               'tower0/PSNR_base',
                               'tower0/PSNR_IMPRO_t%i' % (SEQ_LEN - 1),
                               'tower0/PSNR_IMPRO_t%i' % (SEQ_LEN - 1),
                               'tower0/PSNR_IMPRO_t%i' % (SEQ_LEN - 1),
                           ]),
                           MergeAllSummaries(),
                           RunUpdateOps()
                       ],
                       model=Model(),
                       steps_per_epoch=steps_per_epoch,
                       max_epoch=400)
Example #9
0
def get_config():
    logger.auto_set_dir()
    dataset_train = get_data('train')
    steps_per_epoch = len(dataset_train) * 40
    dataset_val = get_data('val')

    return TrainConfig(
        dataflow=dataset_train,
        callbacks=[
            ModelSaver(),
            ScheduledHyperParamSetter('learning_rate', [(30, 6e-6), (45, 1e-6), (60, 8e-7)]),
            HumanHyperParamSetter('learning_rate'),
            InferenceRunner(dataset_val,
                            BinaryClassificationStats('prediction', 'edgemap4d'))
        ],
        model=Model(),
        steps_per_epoch=steps_per_epoch,
        max_epoch=100,
    )
def get_config(base_dir, meta_dir, batch_size):
    logger.auto_set_dir()
    nr_tower = max(get_nr_gpu(), 1)

    dataset_train = get_data('train', base_dir, meta_dir, batch_size)
    steps_per_epoch = dataset_train.size() * epoch_scale

    return TrainConfig(
        dataflow=dataset_train,
        callbacks=[
            ModelSaver(),
            ScheduledHyperParamSetter('learning_rate', lr_schedule),
            HumanHyperParamSetter('learning_rate'),
            #PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=evaluate_every_n_epoch),
            ProgressBar(["cross_entropy_loss", "cost",
                         "wd_cost"])  #uncomment it to debug for every step
        ],
        model=Model(),
        steps_per_epoch=steps_per_epoch,
        max_epoch=max_epoch,
    )
Example #11
0
def get_config():
    logger.auto_set_dir()

    data3, wd2id = get_PennTreeBank()
    # global VOCAB_SIZE

    conf = Config()

    # VOCAB_SIZE = len(wd2id)
    steps_per_epoch = (data3[0].shape[0] // conf.batch_size -
                       1) // conf.num_steps

    train_data = TensorInput(
        lambda: ptb_producer(data3[0], conf.batch_size, conf.num_steps),
        steps_per_epoch)
    val_data = TensorInput(
        lambda: ptb_producer(data3[1], conf.batch_size, conf.num_steps),
        (data3[1].shape[0] // conf.batch_size - 1) // conf.num_steps)

    test_data = TensorInput(
        lambda: ptb_producer(data3[2], conf.batch_size, conf.num_steps),
        (data3[2].shape[0] // conf.batch_size - 1) // conf.num_steps)

    def get_learning_rate(epoch, base_lr):
        print("\n\nEpoch, " + repr(epoch) + ", " + repr(base_lr))
        print(base_lr)
        if epoch <= 40:  # conf.nr_epoch_first_stage:
            return base_lr
        elif epoch <= 80:  #conf.nr_epoch_second_stage:
            return base_lr * 0.1
        else:
            return base_lr * 0.01

    M = Model()
    from tensorflow.python import debug as tf_debug
    return TrainConfig(
        data=train_data,
        model=M,
        callbacks=[
            # HookToCallback(tf_debug.LocalCLIDebugHook()),
            ModelSaver(),
            HyperParamSetterWithFunc(
                'learning_rate',  #lambda e, x:  1e-3),
                get_learning_rate),
            # lambda e, x: x * 0.80 if e > 6 else x),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(val_data, [ScalarStats(['cost'])]),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(test_data, [ScalarStats(['cost'], prefix='test')],
                            tower_name='InferenceTowerTest'),
            RunOp(lambda: M.reset_lstm_state()),
            CallbackFactory(trigger=lambda self: [
                self.trainer.monitors.put_scalar(
                    'validation_perplexity',
                    np.exp(
                        self.trainer.monitors.get_latest('validation_cost') /
                        conf.num_steps)),
                self.trainer.monitors.put_scalar(
                    'test_perplexity',
                    np.exp(
                        self.trainer.monitors.get_latest('test_cost') / conf.
                        num_steps))
            ]),
        ],
        max_epoch=conf.max_epoch,
    )
Example #12
0
def get_config():
    logger.auto_set_dir()

    data3, wd2id = get_PennTreeBank()
    # global VOCAB_SIZE

    conf = Config()

    # VOCAB_SIZE = len(wd2id)
    steps_per_epoch = (data3[0].shape[0] // conf.batch_size -
                       1) // conf.num_steps

    train_data = TensorInput(
        lambda: ptb_producer(data3[0], conf.batch_size, conf.num_steps),
        steps_per_epoch)
    val_data = TensorInput(
        lambda: ptb_producer(data3[1], conf.batch_size, conf.num_steps),
        (data3[1].shape[0] // conf.batch_size - 1) // conf.num_steps)

    test_data = TensorInput(
        lambda: ptb_producer(data3[2], conf.batch_size, conf.num_steps),
        (data3[2].shape[0] // conf.batch_size - 1) // conf.num_steps)

    # lr = tf.Variable(name='learning_rate')
    # lr = tf.get_variable('learning_rate', trainable=False)

    # tf.summary.scalar('learning_rate', lr)

    # #global conf
    # def get_learning_rate(epoch, base_lr):
    #     #base_lr = conf.learning_rate
    #     conf = Config()
    #     print("\n\nLR: "+repr(epoch)+" | "+repr(base_lr))
    #     # base_lr=conf.learning_rate1e-3
    #     # print(base_lr)
    #     if epoch <= conf.nr_epoch_first_stage:
    #         return base_lr
    #     elif epoch <= conf.nr_epoch_second_stage:
    #         return base_lr * 0.1
    #     else:
    #         return base_lr * 0.01

    def get_learning_rate(epoch, base_lr):
        #base_lr = conf.learning_rate
        conf = Config()
        print("\n\nLR: " + repr(epoch) + " | " + repr(base_lr))
        # base_lr=conf.learning_rate1e-3
        # print(base_lr)
        if epoch <= 70:  #conf.nr_epoch_first_stage
            return base_lr * 0.99  #0.98
        elif epoch <= 90:  #conf.nr_epoch_second_stage
            return base_lr * 0.11
        else:
            return base_lr * 0.09

    M = Model()
    from tensorflow.python import debug as tf_debug
    return TrainConfig(
        data=train_data,
        model=M,
        callbacks=[
            # HookToCallback(tf_debug.LocalCLIDebugHook()),
            ModelSaver(),
            # ScheduledHyperParamSetter('learning_rate',
            #                           [(1, 0.001), (25, 0.001), (35, 0.0005), (55, 0.0001)]),
            HyperParamSetterWithFunc('learning_rate', get_learning_rate),
            # lambda e, x: x * 0.80 if e > 6 else x),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(val_data, [ScalarStats(['cost'])]),
            RunOp(lambda: M.reset_lstm_state()),
            InferenceRunner(test_data, [ScalarStats(['cost'], prefix='test')],
                            tower_name='InferenceTowerTest'),
            RunOp(lambda: M.reset_lstm_state()),
            CallbackFactory(trigger=lambda self: [
                self.trainer.monitors.put_scalar(
                    'validation_perplexity',
                    np.exp(
                        self.trainer.monitors.get_latest('validation_cost') /
                        conf.num_steps)),
                self.trainer.monitors.put_scalar(
                    'test_perplexity',
                    np.exp(
                        self.trainer.monitors.get_latest('test_cost') / conf.
                        num_steps))
            ]),
        ],
        max_epoch=conf.max_epoch,
    )
Example #13
0
from tensorpack.dataflow import *


parser = argparse.ArgumentParser()
parser.add_argument(dest='config')
parser.add_argument('-o', '--output',
                    help='output directory to dump dataset image. If not given, will not dump images.')
parser.add_argument('-s', '--scale',
                    help='scale the image data (maybe by 255)', default=1, type=int)
parser.add_argument('--index',
                    help='index of the image component in datapoint',
                    default=0, type=int)
parser.add_argument('-n', '--number', help='number of images to dump',
                    default=10, type=int)
args = parser.parse_args()
logger.auto_set_dir(action='d')

get_config_func = imp.load_source('config_script', args.config).get_config
config = get_config_func()
config.dataset.reset_state()

if args.output:
    mkdir_p(args.output)
    cnt = 0
    index = args.index   # TODO: as an argument?
    for dp in config.dataset.get_data():
        imgbatch = dp[index]
        if cnt > args.number:
            break
        for bi, img in enumerate(imgbatch):
            cnt += 1
Example #14
0
    parser_evaluate.add_argument(
        "-best_model",
        dest="best_model",
        default=0,
        type=int,
        help="best model to evaluate",
    )
    parser_evaluate.add_argument("-add_epochs",
                                 dest="add_epochs",
                                 default=0,
                                 type=int,
                                 help="epochs to continue")
    args = parser.parse_args()
    argdict = vars(args)
    name = "seed_{}".format(argdict["seed"])
    logger.auto_set_dir(action="k", name=name)

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    step = int(293142 / args.batch_size)
    if args.command == "train":
        # set seed
        tf.set_random_seed(args.seed)
        random.seed(args.seed)
        np.random.seed(args.seed)
        # train
        ds = getdata("./mdb100/train.mdb", args.batch_size, True)
        dss = getdata("./mdb100/test.mdb", args.batch_size, False)
        config = get_config(ds, dss, args)
        launch_train_with_config(config, SimpleTrainer())
Example #15
0
    parser.add_argument('--load', help='load model')
    parser.add_argument('--apply', action='store_true')
    parser.add_argument('--data', help='path to the dataset. '
                        'Can be either a LMDB generated by `data_sampler.py` or the original COCO zip.')
    parser.add_argument('--vgg19', help='load model', default="")
    parser.add_argument('--lowres', help='low resolution image as input', default="", type=str)
    parser.add_argument('--output', help='directory for saving predicted high-res image', default=".", type=str)
    args = parser.parse_args()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.apply:
        apply(args.load, args.lowres, args.output)
    else:
        logger.auto_set_dir()

        if args.load:
            session_init = SaverRestore(args.load)
        else:
            assert os.path.isfile(args.vgg19)
            param_dict = dict(np.load(args.vgg19))
            param_dict = {'VGG19/' + name: value for name, value in six.iteritems(param_dict)}
            session_init = DictRestore(param_dict)

        nr_tower = max(get_num_gpu(), 1)
        data = QueueInput(get_data(args.data))
        model = Model()

        trainer = SeparateGANTrainer(data, model, d_period=3)
Example #16
0
    parser.add_argument('--output',
                        help='directory for saving the rendering',
                        default=".",
                        type=str)
    args = parser.parse_args()
    print(args)
    parser.print_help()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.apply:
        apply(args.load, args.image, args.style)
    else:
        # Set the logger directory
        logger.auto_set_dir()

        nr_tower = max(get_nr_gpu(), 1)
        # ds_train, ds_valid = QueueInput(get_data(args.image, args.style))
        ds_train, ds_valid = get_data(args.image, args.style)

        ds_train = PrintData(ds_train)
        ds_valid = PrintData(ds_valid)

        ds_train = PrefetchDataZMQ(ds_train, 8)
        ds_valid = PrefetchDataZMQ(ds_valid, 1)

        model = Model()

        if args.load:
            session_init = SaverRestore(args.load)
Example #17
0
    STOP_GRADIENTS_PARTIAL = args.stopgradpartial
    SG_GAMMA = args.sg_gamma
    SAMLOSS = args.samloss
    EXP3_GAMMA = args.exp_gamma
    SUM_RAND_RATIO = args.sum_rand_ratio
    TRACK_GRADIENTS = args.track_grads
    DO_VALID = args.do_validation

    if STOP_GRADIENTS:
        STOP_GRADIENTS_PARTIAL = True
        SG_GAMMA = 0.0

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    logger.auto_set_dir(log_root=args.log_dir)
    utils.set_dataset_path(path=args.data_dir, auto_download=False)

    logger.info("On Dataset CIFAR{}, Parameters: f= {}, n= {}, w= {}, c= {}, s= {}, batch_size= {}, stopgrad= {}, stopgradpartial= {}, sg_gamma= {}, rand_loss_selector= {}, exp_gamma= {}, sum_rand_ratio= {} do_validation= {} exp_base= {} opt_at= {}".format(\
                NUM_CLASSES, FUNC_TYPE, NUM_UNITS, WIDTH, INIT_CHANNEL, \
                NUM_UNITS_PER_STACK, BATCH_SIZE, STOP_GRADIENTS, \
                STOP_GRADIENTS_PARTIAL, SG_GAMMA, \
                args.samloss, EXP3_GAMMA, SUM_RAND_RATIO, DO_VALID, \
                EXP_BASE, OPTIMAL_AT))

    config = get_config()
    if args.load:
        config.session_init = SaverRestore(args.load)
    if args.gpu:
        config.nr_tower = len(args.gpu.split(','))
    SyncMultiGPUTrainer(config).train()
Example #18
0
File: edr4.py Project: voidiak/MTRE
    parser_resume.add_argument("-epochs",
                               dest="epochs",
                               required=True,
                               type=int,
                               help="epochs to train for")

    args = parser.parse_args()
    set_gpu(args.gpu)
    if args.command == "train":

        # set seed
        tf.set_random_seed(args.seed)
        random.seed(args.seed)
        np.random.seed(args.seed)

        logger.auto_set_dir(action="k", name=args.name)

        ds = getdata("./mdb/train.mdb", True)

        dss = getdata("./mdb/test.mdb", False)
        config = get_config(ds, dss, args)
        launch_train_with_config(config, SimpleTrainer())

    elif args.command == "resume":
        logger.auto_set_dir(action="k")

        ds = getdata("./mdb/train.mdb", True)
        dss = getdata("./mdb/test.mdb", False)
        resume_config = resume_train(ds, dss, args)
        launch_train_with_config(resume_config, SimpleTrainer())
Example #19
0
IMAGE_SIZE = 28


def get_data():
    def f(dp):
        im = dp[0][:, :, None]
        onehot = np.eye(10)[dp[1]]
        return [im, onehot]

    train = BatchData(MapData(dataset.Mnist('train'), f), 128)
    test = BatchData(MapData(dataset.Mnist('test'), f), 256)
    return train, test


if __name__ == '__main__':
    logger.auto_set_dir('d')

    def model_func(image):
        """
        Keras model has to be created inside this function to be used with tensorpack.
        """
        M = keras.models.Sequential()
        # input_tensor have to be used here for tensorpack trainer to function properly.
        # Just use inputs[1], inputs[2] if you have multiple inputs.
        M.add(KL.InputLayer(input_tensor=image))
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.MaxPooling2D())
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.MaxPooling2D())
        M.add(KL.Conv2D(32, 3, padding='same', activation='relu'))
Example #20
0
File: dr4.py Project: voidiak/MTRE
        "-start_epoch",
        dest="start_epoch",
        type=int,
        help="number of the starting epoch",
    )

    args = parser.parse_args()
    set_gpu(args.gpu)
    if args.command == "train":

        # set seed
        tf.set_random_seed(args.seed)
        random.seed(args.seed)
        np.random.seed(args.seed)

        logger.auto_set_dir(action="k")

        ds = getdata("./mdb/train.mdb", True)

        dss = getdata("./mdb/test.mdb", False)
        config = get_config(ds, dss, args)
        launch_train_with_config(config, SimpleTrainer())

    elif args.command == "resume":
        logger.auto_set_dir(action="k")

        ds = getdata("./mdb/train.mdb", True)

        dss = getdata("./mdb/test.mdb", False)
        resume_config = resume_train(ds, dss, args)
        launch_train_with_config(resume_config, SimpleTrainer())
Example #21
0
                        help='Whether to stop gradients.',
                        type=bool,
                        default=False)
    parser.add_argument('--load', help='load model')
    args = parser.parse_args()
    BATCH_SIZE = args.batch_size
    NUM_UNITS = args.num_units
    WIDTH = args.width
    INIT_CHANNEL = args.init_channel
    OPTIMAL_AT = args.optimalat
    STOP_GRADIENTS = args.stopgrad

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if os.getenv('LOG_DIR') is None:
        logger.auto_set_dir()
    else:
        logger.auto_set_dir(log_root=os.environ['LOG_DIR'])
    if os.getenv('DATA_DIR') is not None:
        os.environ['TENSORPACK_DATASET'] = os.environ['DATA_DIR']
    logger.info("Parameters: n= {}, w= {}, c= {}, o= {}, batch_size={}, stopgrad= {}".format(NUM_UNITS,\
        WIDTH, INIT_CHANNEL, OPTIMAL_AT, BATCH_SIZE, STOP_GRADIENTS))

    config = get_config()
    if args.load:
        config.session_init = SaverRestore(args.load)
    if args.gpu:
        config.nr_tower = len(args.gpu.split(','))
    SyncMultiGPUTrainer(config).train()
Example #22
0
    parser.add_argument('--batch', help="batch_size", type=int, default=1)
    parser.add_argument('--gpu', help='gpu list', default=0)
    parser.add_argument('--train', help='mode', default=False)
    parser.add_argument('--logname',help="modify the log dir")
    args = parser.parse_args()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    BATCH = args.batch

    print args.datadir
    print os.path.join(args.datadir, '*.lmdb?')
    if args.train:
        #auto_set_dir(action=None, name=None)
        logger.auto_set_dir(name=args.logname)
        #how to get data

        lmdbs = glob.glob(os.path.join(args.datadir, '*.lmdb?'))
        ds_train = [Decoderlmdb(lmdb) for lmdb in lmdbs]
        ds_train = RandomMixData(ds_train)
        ds_train = BatchData(ds_train, BATCH)
        ds_train = PrefetchData(ds_train, 100, 1)

        #lmdbs = glob.glob(os.path.join(datadir, 'val*.lmdb'))
        #ds_val = [Decoderlmdb(lmdb) for lmdb in lmdbs]
        #ds_val = RandomMixData(ds_val)
        #ds_val = BatchData(ds_val, BATCH_SIZE)
        #ds_val = FixedSizeData(ds_val, 100)
        #ds_val = PrefetchDataZMQ(ds_val, 8)
        
Example #23
0
IMAGE_SIZE = 28


def get_data():
    def f(dp):
        im = dp[0][:, :, None]
        onehot = np.eye(10)[dp[1]]
        return [im, onehot]

    train = BatchData(MapData(dataset.Mnist('train'), f), 128)
    test = BatchData(MapData(dataset.Mnist('test'), f), 256)
    return train, test


if __name__ == '__main__':
    logger.auto_set_dir('d')

    def model_func(image):
        """
        Keras model has to be created inside this function to be used with tensorpack.
        """
        M = keras.models.Sequential()
        # input_tensor have to be used here for tensorpack trainer to function properly.
        # Just use inputs[1], inputs[2] if you have multiple inputs.
        M.add(KL.InputLayer(input_tensor=image))
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.MaxPooling2D())
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
        M.add(KL.MaxPooling2D())
        M.add(KL.Conv2D(32, 3, padding='same', activation='relu'))