Exemplo n.º 1
0
weight_decay = config['weight_decay']
net = []
regularizers = []
inputs = np.random.randn(config['batch_size'], 1, 28, 28)
net += [layers.Convolution(inputs, 16, 5, "conv1")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv1_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool1")]
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv2_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
## 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'fc3_l2reg')
]
net += [layers.ReLU(net[-1], "relu3")]
net += [layers.FC(net[-1], 10, "logits")]

data_loss = layers.SoftmaxCrossEntropyWithLogits()
loss = layers.RegularizedLoss(data_loss, regularizers)

nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
nn.evaluate("Test", test_x, test_y, net, loss, config)
Exemplo n.º 2
0
        x_data = []
        for input_value in input_data:
            x_data.append(input_value[0])
            x_data.append(input_value[1])

        train_x.append(x_data)
        train_y.append(output)

train_x = np.array(train_x)
train_y = np.array(train_y)

net = []
inputs = np.random.randn(config['batch_size'], len(train_x))
net += [layers.FC(inputs, 50, "fc1")]
net += [layers.Sigmoid(net[-1], "sg1")]
net += [layers.FC(net[-1], 10, "fc2")]
net += [layers.Sigmoid(net[-1], "sg2")]
net += [layers.FC(net[-1], 5, "fc3")]

loss = layers.MeanSquareError()

nn.train(train_x, train_y, net, loss, config)

while True:
    pointsFilename = input("Enter file name:")
    with open(pointsFilename) as f:
        points = json.load(f)
        points = np.array(points).reshape(1, 100)
    nn.evaluate(net, np.array(points))
    print('DONE')
Exemplo n.º 3
0
    ]
    net += [layers.MaxPooling(net[-1], "pool1")]
    net += [layers.ReLU(net[-1], "relu1")]
    net += [layers.Convolution(net[-1], 32, 5, "conv2")]
    regularizers += [
        layers.L2Regularizer(net[-1].weights, weight_decay, "conv2_l2reg")
    ]
    net += [layers.MaxPooling(net[-1], "pool2")]
    net += [layers.ReLU(net[-1], "relu2")]
    # 7x7
    net += [layers.Flatten(net[-1], "flatten3")]
    net += [layers.FC(net[-1], 512, "fc3")]
    regularizers += [
        layers.L2Regularizer(net[-1].weights, weight_decay, "fc3_l2reg")
    ]
    net += [layers.ReLU(net[-1], "relu3")]
    net += [layers.FC(net[-1], 10, "logits")]

    data_loss = layers.SoftmaxCrossEntropyWithLogits()
    loss = layers.RegularizedLoss(data_loss, regularizers)

    nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
    accuracies[weight_decay] = nn.evaluate("Test", test_x, test_y, net, loss,
                                           config)

    # Comment if you don't have overheating issues like me
    time.sleep(120)

with open(SAVE_DIR / "results.json", mode="w+") as file:
    json.dump(accuracies, file, sort_keys=False, ensure_ascii=False, indent=2)
Exemplo n.º 4
0
    trainData = loadMNIST.loadMNISTImages('train-images.idx3-ubyte')
    trainLabels = loadMNIST.loadMNISTLabels('train-labels.idx1-ubyte')
    testData = loadMNIST.loadMNISTImages('t10k-images.idx3-ubyte')
    testLabels = loadMNIST.loadMNISTLabels('t10k-labels.idx1-ubyte')
    trainProbabilities = np.zeros((10, trainLabels.size));
    for a in range(trainLabels.size):
        trainProbabilities[trainLabels[a], a] = 1
    testProbabilities = np.zeros((10, testLabels.size));
    for a in range(testLabels.size):
        testProbabilities[testLabels[a], a] = 1

    estimatedTrainProbabilities = nn.ff(model, trainData)
    trainLoss = np.zeros(epochMax+1)
    trainAcc = np.zeros(epochMax+1)
    trainLoss[0], trainAcc[0] = nn.evaluate(model, opt, trainData, trainProbabilities)    
    testLoss = np.zeros(epochMax+1)
    testAcc = np.zeros(epochMax+1)
    testLoss[0], testAcc[0] = nn.evaluate(model, opt, testData, testProbabilities)    
    for epoch in range(1, epochMax+1):
        nn.train(model, opt, trainData, trainProbabilities)
        trainLoss[epoch], trainAcc[epoch] = nn.evaluate(model, opt, trainData, trainProbabilities)    
        testLoss[epoch], testAcc[epoch] = nn.evaluate(model, opt, testData, testProbabilities)    
        print('Epoch:', epoch, ', Learning Rate:', opt.lr, ', Loss:', trainLoss[epoch], '/', testLoss[epoch], 'Acc:', trainAcc[epoch], '/', testAcc[epoch])
        if trainLoss[epoch]>trainLoss[epoch-1]:
            opt.lrDecay();

    time_end = time.time()
    print('Elapsed time:', time_end-time_start, 'seconds.')
    print('Training accuracy:', trainAcc[-1]*100, '%;')
    print('Test accuracy:', testAcc[-1]*100, '%.')
Exemplo n.º 5
0
def predict(in_fname,
            lin_n_cv_iters,
            n_cv_iters,
            regularizations,
            n_labs,
            age_index,
            gender_index,
            out_fname,
            nn_out_fname=None,
            verbose=False,
            emb_fnames=None):

    if verbose:
        print "loading data"

    X_train, Y_train, X_validation, Y_validation, X_test, Y_test = features.get_data(
        in_fname)

    emb_data_list = [None]
    emb_fname_list = ['']
    if emb_fnames is not None:
        for emb_fname in emb_fnames:
            emb_data_list.append(emb.get_emb_data(emb_fname))
            emb_fname_list.append(emb_fname)

    if verbose:
        print "training, validating and testing models"

    results = []

    for e, emb_data in enumerate(emb_data_list):
        if verbose:
            print str(e)

        if verbose:
            print "-->L2"

        model = models.L2(X_train, Y_train, X_validation, Y_validation, X_test,
                          Y_test, n_labs, emb_data)
        if lin_n_cv_iters == -1:
            params = [[False, True], regularizations]
        else:
            params = [['sample', False, True],
                      ['uniform', regularizations[0], regularizations[-1]]]

        model.crossvalidate(params=params,
                            param_names=['fit_intercept', 'C'],
                            n_cv_iters=lin_n_cv_iters)
        model.test()
        s = model.summarize()
        s['emb_fname'] = emb_fname_list[e]
        results.append(s)

        if verbose:
            print "-->L1"

        model = models.L1(X_train, Y_train, X_validation, Y_validation, X_test,
                          Y_test, n_labs, age_index, gender_index, emb_data)
        if lin_n_cv_iters == -1:
            params = [[False, True], regularizations]
        else:
            params = [['sample', False, True],
                      ['uniform', regularizations[0], regularizations[-1]]]
        model.crossvalidate(params=params,
                            param_names=['fit_intercept', 'C'],
                            n_cv_iters=lin_n_cv_iters)
        model.test()
        s = model.summarize()
        s['emb_fname'] = emb_fname_list[e]
        results.append(s)

        if verbose:
            print "-->RandomForest"

        model = models.RandomForest(X_train, Y_train, X_validation,
                                    Y_validation, X_test, Y_test, emb_data)
        if n_cv_iters == -1:
            params = [[1, 10, 20], [1, 3,
                                    10], ['sqrt_n_features', 'n_features'],
                      [1, 3, 10], [1, 3, 10], [True, False],
                      ['gini', 'entropy']]
        else:
            params = [['randint', 1, 20], ['randint', 1, 10],
                      ['sample', 'sqrt_n_features', 'n_features'],
                      ['randint', 1, 10], ['randint', 1, 10],
                      ['sample', True, False], ['sample', 'gini', 'entropy']]
        param_names = [
            'n_estimators', 'max_depth', 'max_features', 'min_samples_split',
            'min_samples_leaf', 'bootstrap', 'criterion'
        ]
        model.crossvalidate(params=params,
                            param_names=param_names,
                            n_cv_iters=n_cv_iters)
        model.test()
        s = model.summarize()
        s['emb_fname'] = emb_fname_list[e]
        results.append(s)

        if emb_data is not None:
            if verbose:
                print "-->Only embeddings"

            model = models.L(emb_data[0], Y_train, emb_data[1], Y_validation,
                             emb_data[2], Y_test, None)
            if lin_n_cv_iters == -1:
                params = [['l1', 'l2'], [False, True], regularizations]
            else:
                params = [['sample', 'l1', 'l2'], ['sample', False, True],
                          ['uniform', regularizations[0], regularizations[-1]]]

            model.crossvalidate(params=params,
                                param_names=['penalty', 'fit_intercept', 'C'],
                                n_cv_iters=lin_n_cv_iters)
            model.test()
            s = model.summarize()
            s['emb_fname'] = emb_fname_list[e]
            results.append(s)

    with open(out_fname, 'w') as fout:
        fout.write(yaml.dump(results))

    if nn_out_fname is not None:
        best_model = nn.evaluate(nn_out_fname,
                                 n_cv_iters,
                                 20,
                                 X_train,
                                 Y_train,
                                 X_validation,
                                 Y_validation,
                                 X_test,
                                 Y_test,
                                 45,
                                 models=['cnn2'],
                                 random_seed=345,
                                 verbose=verbose)
Exemplo n.º 6
0
train_x, valid_x, test_x = (x - train_mean for x in (train_x, valid_x, test_x))
train_y, valid_y, test_y = (dense_to_one_hot(y, 10)
                            for y in (train_y, valid_y, test_y))

net = []
inputs = np.random.randn(config['batch_size'], 1, 28, 28)
net += [layers.Convolution(inputs, 16, 5, "conv1")]
net += [layers.MaxPooling(net[-1], "pool1")]
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
# out = 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
net += [layers.ReLU(net[-1], "relu3")]
net += [layers.FC(net[-1], 10, "logits")]

loss = layers.SoftmaxCrossEntropyWithLogits()

nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
nn.evaluate("Test", test_x, test_y, net, loss, config)

with SummaryWriter() as writer:
    nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
    test_acc, loss_avg = nn.evaluate("Test", test_x, test_y, net, loss, config)
    writer.add_hparams({'weight_decay': 0}, {
        'hparam/accuracy': test_acc,
        'hparam/avg_loss': loss_avg
    })