コード例 #1
0
def mlp(X_train,
        Y_train,
        X_test,
        Y_test,
        epoches=100,
        hidden=100,
        lr=0.01,
        batch_size=64):
    """MLP training."""

    input_feature_dim = X_train.shape[1]
    output_feature_dim = 1
    x_train = torch.tensor(X_train)
    y_train = torch.tensor(Y_train)
    x_test = torch.tensor(X_test)
    y_test = torch.tensor(Y_test)
    train_dataset = MLP.CustomDataset(torch.tensor(X_train),
                                      torch.tensor(Y_train))
    test_dataset = MLP.CustomDataset(torch.tensor(X_test),
                                     torch.tensor(Y_test))
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    train_loader = DataLoader(train_dataset, batch_size=batch_size)

    model = MLP.MLP(n_feature=input_feature_dim,
                    n_hidden=hidden,
                    n_output=output_feature_dim,
                    activation='Sigmoid')

    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
    loss_func = torch.nn.MSELoss()

    for epoch in range(1, epoches):
        loss = MLP.train(model, epoch, train_dataset, train_loader, optimizer,
                         loss_func)
        train_loss = MLP.test(model, train_loader, loss_func)
        test_loss = MLP.test(model, test_loader, loss_func)
        if epoch % 10 == 0:
            print(
                'Epoch: {:03d}, Loss: {:.5f}, train_loss: {:.5f}, test_loss: {:.5f}'
                .format(epoch, loss, train_loss, test_loss))

    model.eval()
    output = model(x_test)
    mse = mean_squared_loss(x_test.shape[0], np.array(y_test.detach()),
                            np.array(output[:, 0].detach()))
    train_output = model(x_train)
    train_mse = mean_squared_loss(x_train.shape[0], np.array(y_train.detach()),
                                  np.array(train_output[:, 0].detach()))
    print(mse)
    return model, train_mse, mse
コード例 #2
0
def hyperparameter_search(args):
    l = args.layers
    for f in args.feature_extractor:
        for e in args.epochs:
            for lr in args.learning_rate:
                for a in args.alpha:
                    for n in args.num_examples:
                        print('Training model with params:',
                              [f, e, lr, l, a, n])
                        new_args = copy.deepcopy(args)
                        new_args.feature_extractor = f
                        new_args.epochs = e
                        new_args.learning_rate = lr
                        new_args.layers = l
                        new_args.alpha = a
                        new_args.num_examples = n
                        mlp = train(new_args)
                        save(mlp, new_args)
コード例 #3
0
ファイル: test_mlp.py プロジェクト: jw-develop/cs394-projects
def test(r,data,target,p_count):
	# X_train, X_test, y_train, y_test = train_test_split(
	# dataset[0], dataset[1], test_size=ts,random_state=r)

	X_train = data
	X_test = data
	y_train = target
	y_test = target

	# Train and predict values.
	classifier = mlp.train(p_count, X_train, y_train)
	y_pred = mlp.classify(classifier, X_test)

	# Print the results.
	print(y_pred)
	print(y_test.T)

	# Print percentage success.
	percent = 1 - np.mean(y_pred != y_test.T)
	print("\n{:.2f}\n".format(percent))

	return percent
コード例 #4
0
def test(ts, r, dataset):
    X_train, X_test, y_train, y_test = train_test_split(dataset[0],
                                                        dataset[1],
                                                        test_size=ts,
                                                        random_state=r)

    # Perceptron count
    p_count = 1

    illegal = mlp.illegal_train(p_count, X_train, y_train, r)
    y_pred = mlp.illegal_classify(illegal, X_test)
    percent = 1 - np.mean(y_pred != y_test)
    print("\n{:.2f}".format(percent))

    # Training the classifier.
    classifier = mlp.train(p_count, X_train, y_train)
    y_pred = mlp.classify(classifier, X_test)

    percent = 1 - np.mean(y_pred != y_test)
    print("\n{:.2f}\n".format(percent))

    return percent
コード例 #5
0
ファイル: mlptest.py プロジェクト: awmanoj/machinelearning
import mlp
from numpy import *
import sys
inputs = array(([[0,0],[0,1],[1,0],[1,1]]))
targets = array(([0],[1],[1],[0]))
targets2 = array(([0],[1],[1],[1]))
targets3 = array(([0],[0],[0],[1]))

if sys.argv[2] == "1":
	mlp.train(inputs, targets, 2, 1, 0.25, int(sys.argv[1]))
elif sys.argv[2] == "2":
	mlp.train(inputs, targets2, 2, 1, 0.25, int(sys.argv[1]))
else:
	mlp.train(inputs, targets3, 2, 1, 0.25, int(sys.argv[1]))
コード例 #6
0
                        batch_size=batch_size,
                        transforms=[
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307, ), (0.3081, ))
                        ])

# model = Net(device)
# model.to(device)

# optimizer = optim.SGD(model.parameters(), lr, momentum)

# print("NON-SEQUENTIAL")

# test(model, dataset.test_data(), device)
# train(epochs, model, dataset.train_data(), optimizer, device)
# test(model, dataset.test_data(), device)

print("SEQUENTIAL")
# del model

model = Net(device)
model.to(device)
model.cuda()
optimizer = optim.SGD(model.parameters(), lr, momentum)

for task in range(dataset.n_tasks):
    print("-- TASK %d" % task)
    train(epochs, model, dataset.train_data(task), optimizer, device)
    test(model, dataset.test_data(task), device)
test(model, dataset.test_data(), device)
コード例 #7
0
    # n = [[EIGEN_VALUE_1, EIGEN_VALUE_2], [ECCENTRICITY], [NUMBER_GRAIN]]
    print("Trainning linear MLP...")
    # allComb = [list(j) for i in range(1,len(n)+1) for j in itertools.combinations(n, i)]
    allComb = [[
        MEAN_AREA, PERIMETER, R, B, G, EIGEN_VALUE_1, EIGEN_VALUE_2,
        ECCENTRICITY
    ]]
    for feat in allComb:
        # print(n, np.array(ftrain)[:, n].shape)
        # feat = [i for i in m]
        # for i in n: feat += i
        print('Paremeters :', [features[i] for i in feat],
              " ##### Number of classes :", [i for i in grain_class])
        modleFile = result_dir + 'weights_' + ''.join([str(i)
                                                       for i in feat]) + '.h5'
        if os.path.isfile(modleFile):
            model = keras.models.load_model(modleFile)
        else:
            model = train(np.array(ftrain)[:, feat],
                          np.array(y_train),
                          modelf=modleFile)
            model.save(modleFile)
            print("Model file is saved !!")
        score = model.evaluate(np.array(ftest)[:, feat], np.array(y_test))
        print('MLP Test loss:', score[0])
        print('MLP Test accuracy:', score[1])

        fd.write("Featrues: " + str([features[i] for i in feat]) + '\n')
        fd.write('MLP Test loss: %f\n' % (score[0]))
        fd.write('MLP Test accuracy: %f\n\n' % (score[1]))
コード例 #8
0
        print("testing_set size",len(testing_set))

        print("training_set size",len(training_set)*(input_size/1.0e9))
        print("testing_set size",len(testing_set)*(input_size/1.0e9))

        print("making model")
        #training_set = training_set[:5000]
        #testing_set = testing_set[:1000]

        X_train,y_train = utils.make_X_y(training_set,w,h,offset=[offset_w,offset_h],size=[w,h])
        X_test,y_test = utils.make_X_y(testing_set,w,h,offset=[offset_w,offset_h],size=[w,h]) 

        model = mlp.make_model(input_size,h1,h2=h2,classes=nb_classes,activation='relu',dropout=0.3,lr=0.01)
        
        print("training model")
        score,x_mean,x_max = mlp.train(model, X_train,X_test,y_train,y_test,nb_classes,batch_size,nb_epoch)
        
        score = mlp.test(model, X_test,y_test,x_mean,x_max,nb_classes)
        
        print('Cross Validation result at:', i)
        print('Test score:', score[0])
        print('Test accuracy:', score[1])


        #for epoch in range(nb_epoch):
            #print("processing epoch",epoch)
            ##process batches
            #for start_index, end_index in make_batches(len(training_set), batch_size):
                ##print("processing batch",start_index, end_index)

                #X_train,y_train = utils.make_X_y(training_set[start_index:end_index],288,384,offset=[0,0],size=[100,100])
コード例 #9
0
    h1 = 512
    h2 = 128
    h3 = None
    nb_classes = 2
    
    batch_size = 200
    nb_epoch = 100
    
    training_set = []
        
    for k  in range(len(images)):
        training_set += images[k]
        
        
        #first god/bad classifier
    training_set = [(x,0 if y == 0 else 1) for x,y in training_set]

    random.shuffle(training_set)
        
    print("loading images")
    print("training_set size",len(training_set))

    X_train,y_train = utils.make_X_y(training_set,384,288)
        
    print("making model")
    model = mlp.make_model(input_size,h1,h2=h2,classes=nb_classes)
    print("training model")
    score,max_image,mean_image = mlp.train(model, X_train,None,y_train,None,nb_classes,batch_size,nb_epoch)
        
    mlp.save_model(model,"model-binary.json","model-binary.h5")        
コード例 #10
0
        'Would you like to train or test? (0 for train, 1 for test) ')
    if option != '0' and option != '1':
        print('Invalid input!')

# Train
if option == '0':
    initial = input('Enter the initial neural network file: ')
    inputNodes, hiddenNodes, outputNodes, weights = mlp.readnn(initial)

    trainingFile = input('Enter the neural network training data file: ')
    trainingData = mlp.readdata(trainingFile)

    learn = float(input('Enter the learning rate: '))
    epochs = int(input('Enter the number of epochs: '))
    print("Training in progress!")
    weights = mlp.train(weights, trainingData, learn, epochs)

    print("Training completed!")
    trained = input('Enter the output file for the trained neural network: ')
    mlp.writetrained(trained, inputNodes, hiddenNodes, outputNodes, weights)
    continueprogram = input(
        'Would you like to test this neural network? (Y/N) ')

    if continueprogram.upper() == 'Y':

        inputNodes, hiddenNodes, outputNodes, weights = mlp.readnn(trained)
        testing = input('Enter the neural network testing data file: ')
        trainingData = mlp.readdata(testing)

        confusion = mlp.test(outputNodes, weights, trainingData)
コード例 #11
0
from normalization import z_scaling
import mlp
import pandas as pd

df = pd.read_csv('heart_disease_dataset.csv')
columns_array = list(df.columns)
df_array = df.values
numerical_variables = [
    'age', 'resting_blood_pressure', 'cholesterol', 'max_heart_rate_achieved',
    'num_major_vessels', 'st_depression'
]
z_scaling(df_array, columns_array, numerical_variables)

mlp = mlp.MLP(df_array, 4)
mlp.train(1000)
mlp.model_evaluation()
コード例 #12
0
            for i in range(gcolor.shape[0])
        ],
                         axis=0) / (area * 256)
        _, _, eigen_value = pca(ggray)
        eccentricity = eigen_value[0] / eigen_value[1]
        l = [mean_area, r, b, g, eigen_value[0], eigen_value[1], eccentricity]
        ftrain.append(np.array(l))
    ftest = []
    for gi in range(len(xctest)):
        gcolor = xctest[gi]
        ggray = xgtest[gi]
        h, w = ggray.shape
        area = np.sum(
            np.sum([[1.0 for j in range(w) if ggray[i, j]] for i in range(h)]))
        mean_area = area / (h * w)
        r, b, g = np.sum([
            gcolor[i, j] for j in range(gcolor.shape[1])
            for i in range(gcolor.shape[0])
        ],
                         axis=0) / (area * 256)
        _, _, eigen_value = pca(ggray)
        eccentricity = eigen_value[0] / eigen_value[1]
        l = [mean_area, r, b, g, eigen_value[0], eigen_value[1], eccentricity]
        ftest.append(l)

    # MLP
    print "Trainning linear MLP..."
    model = train(np.array(ftrain), np.array(y_train), 'weights.pkl')
    score = model.evaluate(np.array(ftest), np.array(y_test))
    print('cnn Test loss:', score[0])
    print('cnn Test accuracy:', score[1])
コード例 #13
0
ファイル: mlp_run.py プロジェクト: foolOnTheHill/ml
def run():
    # Network configuration
    num_outputs = 2
    learning_rate = [0.3, 0.5, 0.7]
    hidden_neurons = [30, 50, 60]
    epochs = [150, 500, 2000]

    # Tic-tac-toe dataset
    dataset = loadData()

    trainingResults = open('training-results.txt', 'w')

    networks = []
    errors = []
    index = 0
    for i in range(len(learning_rate)):
        for j in range(len(epochs)):
            for k in range(len(hidden_neurons)):
                print index
                hidden_units = [hidden_neurons[k]]
                (W, b, trainError, validationError) = train(dataset['train'][0], dataset['train'][1], dataset['validation'][0], dataset['validation'][1], learning_rate[i], hidden_units, num_outputs, epochs[j])
                networks.append( (learning_rate[i], epochs[j], hidden_units, num_outputs) )
                errors.append( (validationError, trainError, index) )
                trainingResults.write("%d %f %d %d %f %f\n" % (index, learning_rate[i], epochs[j], hidden_neurons[k], validationError, trainError))
                index += 1

    trainingResults.close()

    bestNetworkResults = open('best-net-results.txt', 'w')

    best_network = networks[min(errors)[2]] # index of the network with the lowest validation error

    # Best parameters
    best_lr = best_network[0]
    best_ep = best_network[1]
    best_hd = best_network[2]
    best_no = best_network[3]

    bestNetworkResults.write('Learning rate: %f\n' % best_lr)
    bestNetworkResults.write('Epochs: %d\n' % best_ep)
    bestNetworkResults.write('Hidden units: %d\n\n' % best_hd[0])

    test_set_size = len(dataset['test'][1])

    test_networks = []
    test_errors = []
    for i in range(30):
        confusionMatrix = [[0, 0], [0, 0]]

        (W, b, trainError, validationError) = train(dataset['train'][0], dataset['train'][1], dataset['validation'][0], dataset['validation'][1], best_lr, best_hd, best_no, best_ep)
        T = fit(dataset['test'][0], W, b, best_hd, best_no, True)
        e = 0
        for k in range(test_set_size):
            if dataset['test'][1][k] != T[k]:
                e += 1
            confusionMatrix[getClass(dataset['test'][1][k])][getClass(T[k])] += 1
        e = float(e) / test_set_size

        bestNetworkResults.write("%d %f %f %f\n" % ((i+1), trainError, validationError, e))

        confFile = open('confusion-matrix-'+str(i+1)+'.txt', 'w')
        for p in range(2):
            s = ''
            for q in range(2):
                s += str(confusionMatrix[p][q]) + ' '
            s += '\n'
            confFile.write(s)
        confFile.close()

        test_networks.append( (W, b, best_hd, best_no) )
        test_errors.append( (e, trainError, validationError, i) )

    bestNetworkResults.close()

    best_test_network = min(test_errors)[3]
コード例 #14
0
                    except Exception as e:
                        logger.printErrorCMD(
                            'error loading {}'.format(wavFile))
                        logger.printErrorCMD(e)
                        pass
        x_data = np.concatenate(x_data, axis=0)
        y_data = np.concatenate(y_data, axis=0)

        if not prepared:
            network, train_fn, val_fn = mlp.prepare_train(
                model, n_bottleneck, n_dim, learning_rate, logger)
            if currentModelParams != None:
                lasagne.layers.set_all_param_values(network,
                                                    currentModelParams)
            prepared = True
        train_err_file, train_batches_file, time_file = mlp.train(
            x_data, y_data, train_fn, batch_size)

        train_err += train_err_file
        train_batches += train_batches_file
    train_loss = train_err / train_batches
    exec_time = time.time() - start_time

    logger.printInfo("Epoch {} of {} took {:.3f}s".format(
        epoch + 1, num_epochs, exec_time))
    logger.printInfo("  training loss:\t\t{:.6f}".format(train_loss))

    valid_err = 0
    valid_batches = 0
    valid_acc = 0
    valid_file_index = 0
    for wavFile in validAudioFilesList:
コード例 #15
0
    def learn_from_sample(self,
                          sample=None,
                          hidden_sizes=(),
                          binarize=[],
                          **kwargs):
        """
        Learn the structural equations from data.

        Given observed data for all vertices in the graph, we can learn the
        structural equations, i.e., how each vertex depends on its parents by
        supervised training.

        Arguments:

            sample: Either the observed data as a dictionary or an integer
            indicating the sample size, or none. For the data dictionary, the
            keys are the vertices and the values are torch tensors. For an
            integer > 0, we try to draw a sample of that size from analytically
            attached equations (see `attach_equation`). Default is `None`,
            which is equivalent to `sample=8192`.

            hidden_sizes: Either a list/tuple or a dictionary. If list or
            tuple, it contains the numbers of neurons in each layer.
            `len(hidden_sizes)-1` is the number of layers. First and last
            entries are input and output dimension. If dictionary, we can
            choose a different architecture for each vertex. The keys are (a
            subset) of vertices and the values are lists or tuples as before.

            binarize: A list or tuple of variables that take binary values
            (always 0/1). For those variables, a final sigmoid layer is
            attached to the network.

            **kwargs: Further named variables that are passed on to the `train`
            function in the `mlp` module (where they are passed to
            `torch.optim.Adam`). `**kwargs` can also contain a `dropout`
            argument.
        """
        # Was a sample provided or do we need to generate one
        if sample is None or isinstance(sample, int):
            n_samples = 8192 if sample is None else sample
            print("There was no sample provided to learn from.")
            print("Generate sample with {} examples.".format(n_samples))
            sample = self.sample(n_samples)
            learned_sample = True
        else:
            learned_sample = False

        dropout = kwargs.pop('dropout', 0.0)

        # Build and train network for all non-roots
        for v in self.non_roots():
            parents = self.parents(v)
            print("Training {} -> {}...".format(parents, v), end=' ')
            data = utils.combine_variables(parents, sample)
            if v in binarize:
                final = torch.nn.Sigmoid()
            else:
                final = None
            hidden = self._get_hidden(hidden_sizes, v)
            net = MLP([data.size(-1), *hidden, sample[v].size(-1)],
                      final=final,
                      dropout=dropout)
            self.learned[v] = train(net, data, sample[v], **kwargs)
            print("DONE")

        if learned_sample:
            return sample
コード例 #16
0
ファイル: init.py プロジェクト: chandiar/project2
def main(state, channel):
    # Load the MNIST dataset.
    print 'Loading MNIST from '
    '''
    mnist = fetch_mldata('MNIST original',
        data_home=data_dir)

    # Split the data into train, valid and test sets.
    # TODO: add Scaling, normalization options.
    # reference: https://github.com/rosejn/torch-datasets/blob/master/dataset/mnist.lua
    # scaling: scale values between [0,1] (by default, they are in the range [0, 255])
    # TODO: try a [-1, 1] scaling which according to this post gives better results for
    # the svm: http://peekaboo-vision.blogspot.ca/2010/09/mnist-for-ever.html
    # Test that the test sets is the same as the one found in Yann LeCun's page.
    train_valid_x = mnist.data[:-10000, :] / scale
    train_valid_y = mnist.target[:-10000]
    test_x = mnist.data[-10000:, :] / scale
    test_y = mnist.target[-10000:]

    del mnist

    # Shuffle the train, valid and test sets since they are ordered.
    train_valid_x, train_valid_y = shuffle(train_valid_x, train_valid_y, random_state=random_state)
    test_x, test_y = shuffle(test_x, test_y)
    '''
    dataset = None
    data_path = None
    splits = None

    if state.features is None:
        if state.dataset == 'mnist':
            dataset = os.path.join(data_dir, 'mnist.pkl.gz')
            splits = [train_size, valid_size, test_size]
            print 'Loading the MNIST dataset from %s' %data_path
        elif state.dataset in ['mq+diff+std+top10']:
            data_path = os.path.join(data_dir, 'MQ', 'standardized', 'diff_augmented')
            print 'Loading the augmented standardized MQ dataset from %s' %data_path
        elif state.dataset in ['mq+diff+log+top10']:
            data_path = os.path.join(data_dir, 'MQ', 'log_normalized', 'diff_augmented')
            print 'Loading the augmented log-normalized MQ dataset from %s' %data_path
        elif state.dataset in ['mq+diff+log+std+top10']:
            data_path = os.path.join(data_dir, 'MQ', 'log_normalized+standardized', 'diff_augmented')
            print 'Loading the augmented log-normalized+standardized MQ dataset from %s' %data_path
        elif state.dataset in ['mq+diff+std+log+top10']:
            data_path = os.path.join(data_dir, 'MQ', 'standardized+log_normalized', 'diff_augmented')
            print 'Loading the augmented standardized+log-normalized MQ dataset from %s' %data_path
        else :
            raise NotImplementedError('Datatset %s not supported!'%state.dataset)
        if state.model in ['nnet', 'cnn']:
            state.gpu = True
            print 'GPU should be enabled'
        # TODO: check how to retrieve the gpu status.
        if state.gpu:
            #print 'GPU enabled'
            print 'Loading dataset in shared variables'
        else:
            #print 'GPU disabled'
            print 'Loading dataset in numpy array'
        datasets = load_data(dataset=dataset, data_path=data_path, splits=splits, shared=state.gpu, state=state)

        train_x, train_y = datasets[0]
        valid_x, valid_y = datasets[1]
        test_x, test_y = datasets[2]
    else:
        print 'Using HOG features'
        assert state.dataset == 'mnist'
        data_path = os.path.join(data_dir, 'mnist.pkl.gz')
        f = gzip.open(data_path, 'rb')
        train_set, valid_set, test_set = cPickle.load(f)
        f.close()

        train_x =  numpy.load(os.path.join(data_dir, 'train_set_hog_features.npy'))
        valid_x =  numpy.load(os.path.join(data_dir, 'valid_set_hog_features.npy'))
        test_x =  numpy.load(os.path.join(data_dir, 'test_set_hog_features.npy'))

        train_y = train_set[1]
        valid_y = valid_set[1]
        test_y = test_set[1]

        #train_x = train_x[0:1000,:]
        #train_y = train_y[0:1000]

        #import pdb; pdb.set_trace()

    # Cross-validation.
    '''
    if cv_strategy == 'KFold':
        assert len(valid_x) > 0
        print 'KFold used'
        # Concatenate both the train and validation sets.
        train_valid_x = numpy.concatenate((train_x, valid_x), axis=0)
        train_valid_y = numpy.concatenate((train_y, valid_y), axis=0)
        kf = cross_validation.KFold(len(train_valid_x), n_folds=9)
        for train_index, valid_index in kf:
            train_x, valid_x = train_valid_x[train_index], train_valid_x[valid_index]
            train_y, valid_y = train_valid_y[train_index], train_valid_y[valid_index]
            train(state, channel, train_x, train_y, valid_x, valid_y, test_x, test_y)
    elif cv_strategy is None:
        print 'No cross-validation'
        train(state, channel, train_x, train_y, valid_x, valid_y, test_x, test_y)
    else:
        raise NotImplementedError('Cross-validation type not supported.')
    '''

    print 'Confing ', state

    # Start timer for training.
    start = time.time()

    if state.model == 'nnet':
        status = mlp.train(state, channel, train_x, train_y, valid_x, valid_y, test_x, test_y)
    elif state.model == 'cnn':
        status = cnn.train(state, channel, train_x, train_y, valid_x, valid_y, test_x, test_y)
    else:
        status = train(state, channel, train_x, train_y, valid_x, valid_y, test_x, test_y)

    stop = time.time()
    print 'It took %s minutes'%( (stop-start) / float(60) )

    if state.save_state:
        print 'We will save the experiment state'
        dump_tar_bz2(state, 'state.tar.bz2')

    return 0