Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description='mnist classification example')
    parser.add_argument('--seed', dest='seed', type=int, default=None, help='random seed')
    parser.add_argument('--backend', dest='backend', type=str, default='pytorch', help='choice of DL framework')
    parser.add_argument('--epoch', dest='epoch', type=int, default=100, help='epoch to train')
    parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='batch size')
    parser.add_argument('--learning_rule', dest='learning_rule', type=str, default='eprop', help='learning rule')
    parser.add_argument('--layer', dest='layer', type=str, default='ALIF', help='type of RNN/RSNN to use')
    parser.add_argument('--hidden', dest='hidden', type=int, default=128, help='number of neurons in a hidden layer')
    parser.add_argument('--firing_thresh', dest='firing_thresh', type=float, default=1.0, help='firing threshhold')
    parser.add_argument('--eprop_mode', dest='eprop_mode', type=str, default='adaptive', help='eprop mode to use')
    parser.add_argument('--reg', dest='reg', action='store_true', default=False, help='enable regularization')
    parser.add_argument('--reg_coeff', dest='reg_coeff', type=float, default=0.00005, help='regularization coefficient')
    parser.add_argument('--reg_target', dest='reg_target', type=int, default=10, help='regularization target')
    args = parser.parse_args()

    _layers = {'rnn': BasicRNNModel, 'lif': LIFRNNModel, 'alif': ALIFRNNModel}
    _learning_rules = {'bptt': Backprop, 'eprop': Eprop}
    _backends = {'torch': pytb, 'pytorch': pytb, 'pyt': pytb, 'tf': tfb, 'tensorflow': tfb}
    layer = _layers[args.layer.lower()]
    learning_rule = _learning_rules[args.learning_rule.lower()]
    n = _backends[args.backend.lower()]

    # generate data
    x_train, y_train, x_test, y_test = MNIST().load()
    x_train, x_test = x_train / 255., x_test / 255.

    if args.learning_rule.lower() == 'bptt' and args.reg:
        loss_fn = get_loss('categorical_crossentropy', backend=n)
        regularization_fn = get_loss('firing_rate_regularization', backend=n, firing_rate_target=args.reg_target)

        def loss_with_reg(*, model, x, y_true):
            return loss_fn(model=model, x=x, y_true=y_true) + \
                   args.reg_coeff * regularization_fn(model=model, x=x, y_true=y_true)

        loss = loss_with_reg
    else:
        loss = 'categorical_crossentropy'
    rnn = layer(args.hidden, output_size=10, backend=n, task_type='classification', return_sequence=False
                , v_th=args.firing_thresh, seed=args.seed)
    evaluated_model = Evaluator(model=rnn, loss=loss, metrics=['accuracy', 'firing_rate'])
    algo = learning_rule(evaluated_model,
                         mode=args.eprop_mode,
                         firing_rate_regularization=args.reg,
                         c_reg=args.reg_coeff,
                         f_target=args.reg_target)
    trainer = Trainer(algo)
    training_log = trainer.train(x_train, y_train, epochs=args.epoch, batch_size=args.batch_size)
    test_result = evaluated_model.evaluate(x_test, y_test, return_nparray=True)
    print('Test: ', test_result)
    completion_time = int(time.time())
    task_log = vars(args)
    task_log['name'] = 'mnist'
    task_log['log'] = training_log
    task_log['completion_time'] = completion_time
    task_log['test_result'] = test_result
    with open(f'mnist_{completion_time}.pkl', 'wb') as f:
        pickle.dump(task_log, f)
Ejemplo n.º 2
0
def get_evaluated_model(layer, hidden, backend, v_th, seed, loss):
    rnn = layer(hidden,
                output_size=10,
                backend=backend,
                task_type='classification',
                return_sequence=False,
                v_th=v_th,
                seed=seed)
    return Evaluator(model=rnn, loss=loss, metrics=['accuracy'])
Ejemplo n.º 3
0
                     pseudo_angle=angle)
d_activation = partial(n.d_heaviside2,
                       v_th=args.firing_thresh,
                       pseudo_bandwidth=bw,
                       pseudo_angle=angle)
rnn = layer(args.hidden,
            output_size=10,
            backend=n,
            activation=activation,
            d_activation=d_activation,
            task_type='classification',
            return_sequence=False,
            v_th=args.firing_thresh,
            seed=args.seed)
evaluated_model = Evaluator(model=rnn,
                            loss=loss,
                            metrics=['accuracy', 'firing_rate'])
algo = learning_rule(evaluated_model,
                     mode=args.eprop_mode,
                     firing_rate_regularization=args.reg,
                     c_reg=args.reg_coeff,
                     x_pred=x_test,
                     burnin=200,
                     sd=0,
                     f_target=args.reg_target)
trainer = Trainer(algo)
training_log = trainer.train(x_train,
                             y_train,
                             epochs=args.epoch,
                             batch_size=args.batch_size,
                             validation_data=(x_test, y_test))
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        description='mnist classification example')
    parser.add_argument('--seed',
                        dest='seed',
                        type=int,
                        default=0,
                        help='random seed')
    parser.add_argument('--framework',
                        dest='framework',
                        type=str,
                        default='pytorch',
                        help='choice of DL framework')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        default=100,
                        help='epoch to train')
    parser.add_argument('--batch_size',
                        dest='batch_size',
                        type=int,
                        default=128,
                        help='batch size')
    parser.add_argument('--learning_rule',
                        dest='learning_rule',
                        type=str,
                        default='manhattan',
                        help='learning rule')
    args = parser.parse_args()

    _learning_rules = {
        'bptt':
        Backprop,
        'manhattan':
        ManhattanRule,
        'manhattan_material':
        lambda x: ManhattanMaterialRule(x, material=Material())
    }
    learning_rule = _learning_rules[args.learning_rule.lower()]
    if args.framework.lower() in ['torch', 'pytorch', 'pyt']:
        n = pytb
        build_function = build_pytorch_model
        adaptor = PytorchAdaptor
    elif args.framework.lower() in ['tf', 'tensorflow']:
        n = tfb
        build_function = build_keras_model
        adaptor = KerasAdaptor

    if args.seed is not None:
        n.seed_random(args.seed)

    # generate data
    x_train, y_train, x_test, y_test = MNIST().load()
    x_train, x_test = x_train / 255., x_test / 255.
    x_train = x_train.reshape(-1, 28 * 28)
    x_test = x_test.reshape(-1, 28 * 28)

    model = adaptor(build_function())
    loss = 'categorical_crossentropy'
    evaluated_model = Evaluator(model=model, loss=loss, metrics=['accuracy'])
    algo = learning_rule(evaluated_model)
    trainer = Trainer(algo)
    training_log = trainer.train(x_train,
                                 y_train,
                                 epochs=args.epoch,
                                 batch_size=args.batch_size,
                                 validation_data=(x_test, y_test))
    test_result = evaluated_model.evaluate(x_test, y_test, return_nparray=True)
    print('Test: ', test_result)
    completion_time = int(time.time())
    task_log = vars(args)
    task_log['name'] = 'mnist_manhattan'
    task_log['log'] = training_log
    task_log['completion_time'] = completion_time
    task_log['test_result'] = test_result
    with open(f'mnist_manhattan_{completion_time}.pkl', 'wb') as f:
        pickle.dump(task_log, f)
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description='Learn sine wave')
    parser.add_argument('--type',
                        dest='type',
                        type=int,
                        default=2,
                        help='version of sine wave generator to use')
    parser.add_argument('--seed',
                        dest='seed',
                        type=int,
                        default=None,
                        help='random seed')
    parser.add_argument('--backend',
                        dest='backend',
                        type=str,
                        default='pytorch',
                        help='choice of DL framework')
    parser.add_argument('--epoch',
                        dest='epoch',
                        type=int,
                        default=1000,
                        help='epoch to train')
    parser.add_argument('--learning_rule',
                        dest='learning_rule',
                        type=str,
                        default='eprop',
                        help='learning rule')
    parser.add_argument('--seqlen',
                        dest='seqlen',
                        type=int,
                        default=1000,
                        help='number of time step of sine wave')
    parser.add_argument('--clock',
                        dest='clock',
                        type=int,
                        default=20,
                        help='clock argument of sine wave generation')
    parser.add_argument('--size',
                        dest='size',
                        type=int,
                        default=4,
                        help='size argument of sine wave generation')
    parser.add_argument('--layer',
                        dest='layer',
                        type=str,
                        default='LIF',
                        help='type of RNN/RSNN to use')
    parser.add_argument('--hidden',
                        dest='hidden',
                        type=int,
                        default=600,
                        help='number of neurons in a hidden layer')
    parser.add_argument('--firing_thresh',
                        dest='firing_thresh',
                        type=float,
                        default=0.615,
                        help='firing threshhold')
    parser.add_argument('--eprop_mode',
                        dest='eprop_mode',
                        type=str,
                        default='symmetric',
                        help='eprop mode to use')
    parser.add_argument('--reg',
                        dest='reg',
                        action='store_true',
                        default=False,
                        help='enable regularization')
    parser.add_argument('--reg_coeff',
                        dest='reg_coeff',
                        type=float,
                        default=0.00005,
                        help='regularization coefficient')
    parser.add_argument('--reg_target',
                        dest='reg_target',
                        type=int,
                        default=10,
                        help='regularization target')
    args = parser.parse_args()

    _layers = {'rnn': BasicRNNModel, 'lif': LIFRNNModel, 'alif': ALIFRNNModel}
    _learning_rules = {'bptt': Backprop, 'eprop': Eprop}
    _backends = {
        'torch': pytb,
        'pytorch': pytb,
        'pyt': pytb,
        'tf': tfb,
        'tensorflow': tfb
    }
    _f = {1: sine_signal, 2: sine_signal_v2}[args.type]
    layer = _layers[args.layer.lower()]
    learning_rule = _learning_rules[args.learning_rule.lower()]
    n = backend = _backends[args.backend.lower()]

    # generate data
    _, x, y = _f(seqlen=args.seqlen,
                 size=args.size,
                 clock=args.clock,
                 return_format=args.backend,
                 seed=args.seed)
    x = n.expand_dims(x, 0)
    y = n.expand_dims(y, 0)

    if args.learning_rule.lower() == 'bptt' and args.reg:
        loss_fn = get_loss('mse', backend=n)
        regularization_fn = get_loss('firing_rate_regularization',
                                     backend=n,
                                     firing_rate_target=args.reg_target)

        def loss_with_reg(*, model, x, y_true):
            return loss_fn(model=model, x=x, y_true=y_true) + \
                   args.reg_coeff * regularization_fn(model=model, x=x, y_true=y_true)

        loss = loss_with_reg
    else:
        loss = 'mse'

    rnn = layer(args.hidden,
                v_th=args.firing_thresh,
                backend=backend,
                seed=args.seed)
    evaluated_model = Evaluator(model=rnn,
                                loss=loss,
                                metrics=['mse', 'firing_rate'])
    algo = learning_rule(evaluated_model,
                         mode=args.eprop_mode,
                         firing_rate_regularization=args.reg,
                         c_reg=args.reg_coeff,
                         f_target=args.reg_target)
    trainer = Trainer(algo)
    training_log = trainer.train(x, y, epochs=args.epoch)
    completion_time = int(time.time())
    task_log = vars(args)
    task_log['name'] = 'sine_wave'
    task_log['log'] = training_log
    task_log['completion_time'] = completion_time
    with open(f'sinewave_{completion_time}.pkl', 'wb') as f:
        pickle.dump(task_log, f)