Esempio n. 1
0
def run_exp(param):
    np.random.seed(param.seed)
    net = nncfg.create_net(param)
    print('network configure end, start loading data ...')

    # load in data
    train_images, train_labels = load(list(range(10)), 'training',
                                      param.path_data)
    test_images, test_labels = load(list(range(10)), 'testing',
                                    param.path_data)

    # create a batch data
    # nbatch: batch size
    # doshuffle: True, shuffle the data
    # scale: 1.0/256 scale by this factor so all features are in [0,1]
    train_xdata, train_ylabel = nncfg.create_batch(train_images, train_labels,
                                                   param.batch_size, True,
                                                   1.0 / 256.0)
    test_xdata, test_ylabel = nncfg.create_batch(test_images, test_labels,
                                                 param.batch_size, True,
                                                 1.0 / 256.0)

    # split validation set
    ntrain = train_xdata.shape[0]
    nvalid = 10000
    assert nvalid % param.batch_size == 0
    nvalid = int(nvalid / param.batch_size)
    valid_xdata, valid_ylabel = train_xdata[0:nvalid], train_ylabel[0:nvalid]
    train_xdata, train_ylabel = train_xdata[nvalid:ntrain], train_ylabel[
        nvalid:ntrain]

    # setup evaluator
    evals = []
    evals.append(
        nnet.NNEvaluator(net, train_xdata, train_ylabel, param, 'train'))
    evals.append(
        nnet.NNEvaluator(net, valid_xdata, valid_ylabel, param, 'valid'))
    evals.append(nnet.NNEvaluator(net, test_xdata, test_ylabel, param, 'test'))

    # set parameters
    param.num_train = train_ylabel.size
    print('loading end,%d train,%d valid,%d test, start update ...' %
          (train_ylabel.size, valid_ylabel.size, test_ylabel.size))

    # By default, `net` has number of updaters equal to 2x the amount of network
    # weights we store, e.g. in the paper they said A, B, a, b, so each has a
    # separate SGHMC updater and also its own hyperparameter updater.
    for it in range(param.num_round):
        param.set_round(it)
        net.update_all(train_xdata, train_ylabel)
        sys.stderr.write('[%d]' % it)
        for ev in evals:
            ev.eval(it, sys.stderr)
        sys.stderr.write('\n')
    print('all update end')
Esempio n. 2
0
def run_exp(param):
    np.random.seed(param.seed)
    net = nncfg.create_net(param)
    print('network configure end, start loading data ...')

    # load in data
    train_images, train_labels = load(range(10), 'training', param.path_data)
    test_images, test_labels = load(range(10), 'testing', param.path_data)

    # create a batch data
    # nbatch: batch size
    # doshuffle: True, shuffle the data
    # scale: 1.0/256 scale by this factor so all features are in [0,1]
    train_xdata, train_ylabel = nncfg.create_batch(train_images, train_labels,
                                                   param.batch_size, True,
                                                   1.0 / 256.0)
    test_xdata, test_ylabel = nncfg.create_batch(test_images, test_labels,
                                                 param.batch_size, True,
                                                 1.0 / 256.0)

    # split validation set
    ntrain = train_xdata.shape[0]
    nvalid = 10000
    assert nvalid % param.batch_size == 0
    nvalid = nvalid // param.batch_size
    valid_xdata, valid_ylabel = train_xdata[0:nvalid], train_ylabel[0:nvalid]
    train_xdata, train_ylabel = train_xdata[nvalid:ntrain], train_ylabel[
        nvalid:ntrain]

    # setup evaluator
    evals = []
    evals.append(
        nnet.NNEvaluator(net, train_xdata, train_ylabel, param, 'train'))
    evals.append(
        nnet.NNEvaluator(net, valid_xdata, valid_ylabel, param, 'valid'))
    evals.append(nnet.NNEvaluator(net, test_xdata, test_ylabel, param, 'test'))

    # set parameters
    param.num_train = train_ylabel.size
    print('loading end,%d train,%d valid,%d test, start update ...' %
          (train_ylabel.size, valid_ylabel.size, test_ylabel.size))

    for it in range(param.num_round):
        param.set_round(it)
        net.update_all(train_xdata, train_ylabel)
        sys.stderr.write('[%d]' % it)
        for ev in evals:
            ev.eval(it, sys.stderr)
        sys.stderr.write('\n')
    print('all update end')
Esempio n. 3
0
def run_exp(param):
    np.random.seed(param.seed)
    net = nncfg.mlp2layer(param)
    print('network configure end, start loading data ...')

    # load in data
    train_images, train_labels = load(range(10), 'training', param.path_data)
    test_images, test_labels = load(range(10), 'testing', param.path_data)

    # create a batch data
    # nbatch: batch size
    # doshuffle: True, shuffle the data
    # scale: 1.0/256 scale by this factor so all features are in [0,1]
    train_xdata, train_ylabel = nncfg.create_batch(train_images, train_labels,
                                                   param.batch_size, True,
                                                   1.0 / 256.0)
    test_xdata, test_ylabel = nncfg.create_batch(test_images, test_labels,
                                                 param.batch_size, True,
                                                 1.0 / 256.0)

    # split validation set
    ntrain = train_xdata.shape[0]
    nvalid = 10000
    assert nvalid % param.batch_size == 0
    nvalid = int(nvalid / param.batch_size)
    valid_xdata, valid_ylabel = train_xdata[0:nvalid], train_ylabel[0:nvalid]
    train_xdata, train_ylabel = train_xdata[nvalid:ntrain], train_ylabel[
        nvalid:ntrain]

    # setup evaluator
    evals = []
    evals.append(
        nnet.NNEvaluator(net, train_xdata, train_ylabel, param, 'train'))
    evals.append(
        nnet.NNEvaluator(net, valid_xdata, valid_ylabel, param, 'valid'))
    evals.append(nnet.NNEvaluator(net, test_xdata, test_ylabel, param, 'test'))

    # set parameters
    param.num_train = train_ylabel.size
    print('loading end,%d train,%d valid,%d test, start update ...' %
          (train_ylabel.size, valid_ylabel.size, test_ylabel.size))

    modelevals = []
    for it in range(param.num_round):
        param.set_round(it)
        net.update_all(train_xdata, train_ylabel)
        sys.stderr.write('[%d]' % it)
        for ev in evals:
            ev.eval(it, sys.stderr)
            modelevals.append(ev.results)
        sys.stderr.write('\n')

    modelevals = np.array(modelevals).reshape(param.num_round, -1)
    inds = np.arange(24, param.num_round, 10)
    plt.plot(inds, modelevals[inds, 4], 'x-.', color='purple')
    plt.ylim(0.015, 0.05)
    plt.ylabel('test error')
    plt.xlabel('iteration')
    plt.savefig('test-err.png')

    print('all update end')