示例#1
0
    def add_biaffine(self, inputs):
        ## inputs [seq_len, batch_size, units]
        ## first define four different MLPs
        arc_roles = ['arc-dep', 'arc-head']
        rel_roles = ['rel-dep', 'rel-head']
        joint_roles = ['jk', 'stag']
        vectors = {}
        for arc_role in arc_roles:
            for i in xrange(self.opts.mlp_num_layers):
                if i == 0:
                    inputs_dim = self.outputs_dim
                    vector_mlp = inputs
                else:
                    inputs_dim = self.opts.arc_mlp_units
                weights = get_mlp_weights('{}_MLP_Layer{}'.format(arc_role, i), inputs_dim, self.opts.arc_mlp_units)
                vector_mlp = self.add_dropout(tf.map_fn(lambda x: mlp(x, weights), vector_mlp), self.mlp_prob)
                ## [seq_len, batch_size, 2*mlp_units]
            vectors[arc_role] = vector_mlp
        weights = get_arc_weights('arc', self.opts.arc_mlp_units)
        arc_output = arc_equation(vectors['arc-head'], vectors['arc-dep'], weights) # [batch_size, seq_len, seq_len] dim 1: deps, dim 2: heads
#        arc_predictions = get_arcs(arc_output, self.test_opts) # [batch_size, seq_len]
        arc_predictions = tf.argmax(arc_output, 2) # [batch_size, seq_len]
        for rel_role in rel_roles:
            for i in xrange(self.opts.mlp_num_layers):
                if i == 0:
                    inputs_dim = self.outputs_dim
                    vector_mlp = inputs
                else:
                    inputs_dim = self.opts.rel_mlp_units
                weights = get_mlp_weights('{}_MLP_Layer{}'.format(rel_role, i), inputs_dim, self.opts.rel_mlp_units)
                vector_mlp = self.add_dropout(tf.map_fn(lambda x: mlp(x, weights), vector_mlp), self.mlp_prob)
                ## [seq_len, batch_size, 2*mlp_units]
            vectors[rel_role] = vector_mlp
        weights = get_rel_weights('rel', self.opts.rel_mlp_units, self.loader.nb_rels)
        rel_output, rel_scores = rel_equation(vectors['rel-head'], vectors['rel-dep'], weights, arc_predictions)  #[batch_size, seq_len, nb_rels]
        ## joint stagging
        for joint_role in joint_roles:
            for i in xrange(self.opts.mlp_num_layers):
                if i == 0:
                    inputs_dim = self.outputs_dim
                    vector_mlp = inputs
                else:
                    inputs_dim = self.opts.joint_mlp_units
                weights = get_mlp_weights('{}_MLP_Layer{}'.format(joint_role, i), inputs_dim, self.opts.joint_mlp_units)
                vector_mlp = self.add_dropout(tf.map_fn(lambda x: mlp(x, weights), vector_mlp), self.mlp_prob)
                ## [seq_len, batch_size, 2*mlp_units]
            vectors[joint_role] = vector_mlp
        weights = get_joint_weights('stag', self.opts.joint_mlp_units, self.loader.nb_stags)
        self.stag_embeddings = tf.transpose(weights['W-joint'], [1,0])
        joint_output = joint_equation(vectors['stag'], weights) # [batch_size, seq_len, nb_stags]
        weights = get_joint_weights('jk', self.opts.joint_mlp_units, self.loader.nb_jk)
        joint_output_jk = joint_equation(vectors['jk'], weights) # [batch_size, seq_len, nb_stags]
        return arc_output, rel_output, rel_scores, joint_output, joint_output_jk
def testMLP(dataFileName, nhidden):
    dataText = np.loadtxt(dataFileName, delimiter=',')
    target = np.zeros((np.shape(dataText)[0], 2))
    indices = np.where(dataText[:, -1] == 0)
    target[indices, 0] = 1
    indices = np.where(dataText[:, -1] == 1)
    target[indices, 1] = 1

    #print dataText
    # 5 fold validation
    val_error = []
    TP = []
    TN = []
    FP = []
    FN = []
    order = range(np.shape(dataText)[0])
    np.random.shuffle(order)
    dataText = dataText[order, :]
    dataText = dataText[:, :-1]
    target = target[order, :]
    for i in range(5):
        valid = dataText[i * len(dataText) / 5:(i + 1) * len(dataText) / 5]
        validt = target[i * len(target) / 5:(i + 1) * len(target) / 5]
        train = np.vstack((dataText[:i * len(dataText) / 5],
                           dataText[(i + 1) * len(dataText) / 5:]))
        traint = np.vstack(
            (target[:i * len(target) / 5], target[(i + 1) * len(target) / 5:]))
        net = mlp.mlp(train, traint, nhidden, outtype='softmax')
        val_error.append(net.earlystopping(train, traint, valid, validt, 0.3))
        cm = net.confmat(valid, validt)
        TP.append(cm[0][0])
        FN.append(cm[0][1])
        FP.append(cm[1][0])
        TN.append(cm[1][1])
    return np.mean(val_error), np.sum(TP), np.sum(FP), np.sum(TN), np.sum(FN)
示例#3
0
def main():
    print("***EXECUTION INITIATED***")
    # train the data
    training_data = get_vectors()['training1and2']
    # create MLP with 12 hidden nodes and 8 output nodes
    MLP = mlp(12, 8, training_data)
    # test the holdout set
    holdout_data = get_vectors()['holdout']
    for example in holdout_data:
        MLP.get_classification(example)

    print()
    # printing the weights and the epochs
    MLP.print_weights()
    MLP.print_epochs()

    #Validation
    print("----------Validation----------")
    validation_set = get_vectors()['training2']
    print("\nConfusion Matrix Rates on Holdout:")
    MLP.print_rates_for_class(3, holdout_data)
    MLP.print_rates_for_class(2, holdout_data)
    print("\nConfusion Matrix Rates on Validation Set:")
    MLP.print_rates_for_class(3, validation_set)
    MLP.print_rates_for_class(2, validation_set)
    accuracy_rate = MLP.get_accuracy(holdout_data)
    error_rate = 1 - accuracy_rate
    print("MLP Accuracy and Error Rates for Holdout:")
    print("Accuracy rate =", accuracy_rate)
    print("Error rate =", error_rate)
    accuracy_rate = MLP.get_accuracy(validation_set)
    error_rate = 1 - accuracy_rate
    print("\nMLP Accuracy and Error Rates for Validation:")
    print("Accuracy rate =", accuracy_rate)
    print("Error rate =", error_rate)
示例#4
0
文件: dnn.py 项目: coreyker/pendant
	def __init__(self, layers, mean=None, rng_seed=None):
		'''
		layers is a list specifying the dimension of each network layer:
			layers = [#n_visible, #n_hidden1, #n_hidden2, ...]
		The final layer is a softmax layer, with one node per class label
		E.g.,
			layers = [20,10,2]		
		implements binary classification with inputs of dimension 20, and 10 units in the first hidden layer

		The DNN is trained by first calling pre_train() to perform unsupervised pre-training, 
		and then fine_tune() to perform discriminative (supervised) fine tuning
		'''
		self.layers = layers
		self.n_hid  = len(layers) - 1 		
		self.mlp    = mlp.mlp(layers, rng_seed) # initialize multi-layer perceptron
		self.rbm    = (self.n_hid - 1) * [None]
		
		# initialize RBMs
		if 1:
			self.rbm[0] = rbm.rbm(layers[0], layers[1], input_type='binary', mean=mean)
		else:
			self.rbm[0] = rbm.rbm(layers[0], layers[1], input_type='gaussian')

		for k in xrange(1, self.n_hid - 1): # skip last layer which uses softmax instead of binary units
			self.rbm[k] = rbm.rbm(layers[k], layers[k+1], input_type='binary')
示例#5
0
def main():

    preprocessFacebook('dataset_Facebook.csv', 'facebook.csv')
    data = np.loadtxt('facebook.csv', delimiter=';')
    data = data - data.mean(axis=0)
    imax = np.concatenate((data.max(axis=0) * np.ones(
        (1, 19)), np.abs(data.min(axis=0) * np.ones((1, 19)))),
                          axis=0).max(axis=0)
    data = data / imax

    train = data[::2, :]
    trainTarget = data[::2, 18].reshape((np.shape(train)[0]), 1)
    valid = data[1::4, :]
    validTarget = data[1::4, 18].reshape((np.shape(valid)[0]), 1)
    test = data[3::4, :]
    testTarget = data[3::4, 18].reshape((np.shape(test)[0]), 1)

    net = mlp.mlp(train, trainTarget, 30, outtype='linear')
    net.earlystopping(train, trainTarget, valid, validTarget, 0.4, 500)

    test = np.concatenate((test, -np.ones((np.shape(test)[0], 1))), axis=1)
    testout = net.mlpfwd(test)

    pl.figure()
    pl.plot(np.arange(np.shape(test)[0]), testout, '.')
    pl.plot(np.arange(np.shape(test)[0]), testTarget, 'x')
    pl.legend(('Predictions', 'Targets'))
    pl.show()
示例#6
0
def trainData():
    with open('fixeddata.csv') as f:
        numCol = csv.reader(f, delimiter=',')
        first_row = next(numCol)
        numCol = len(first_row)
        print(numCol)
        cancerData = np.loadtxt('fixeddata.csv',
                                delimiter=',',
                                usecols=range(1, 11)).astype(np.float)
        print(np.shape(cancerData))
        cancerData = cancerData / cancerData.max(axis=0)
        np.random.shuffle(cancerData)

        traindata = cancerData[0:349, 0:9]
        traindatat = cancerData[0:349, 9:10]
        datavalid = cancerData[349:523, 0:9]
        datavalidt = cancerData[349:523, 9:10]
        datatest = cancerData[523:699, 0:9]
        datatestt = cancerData[523:699, 9:10]

        #net = mlp.mlp(traindata,traindatat,40,outtype='linear')
        #net.mlptrain(traindata,traindatat,.005,650)
        net = mlp.mlp(traindata, traindatat, 10, outtype='linear')
        #net.mlptrain(traindata,traindatat,.4,1000)
        net.earlystopping(traindata, traindatat, datavalid, datavalidt, 0.35)
        net.confmat(datatest, datatestt)
示例#7
0
def kfolds(movements, target, iterations=10, hidden=6, k=5, outtype='logistic'):

	n_networks = []

	for i in range(k):
		test = movements[i::k,0:40]
		test_targets = target[i::k]

		valid = movements[((i+1)%k)::k,0:40]
		valid_targets = target[((i+1)%k)::k]

		train = [item for sublist in [movements[((i + j) % k)::k,0:40] for j in range(2, k)] for item in sublist]
		train_targets = [item for sublist in [target[((i + j) % k)::k] for j in range(2, k)] for item in sublist]

		# Initialize the network:
		net = mlp.mlp(train, train_targets, hidden, beta, momentum, outtype)

		# Store error and percentage correct:
		net.earlystopping(train, train_targets, valid, valid_targets,learning_rate)
		net.confusion(test, valid_targets)

		# Store the neural network:
		n_networks.append(net)

	percentages = []
	errors = []
	for i in range(len(n_networks)):
		percentages.append(n_networks[i].percentage)
		errors.append(n_networks[i].errors)

	print("Average Percentage: {}".format(statistics.mean(percentages)))
	print("Standard deviation: {}".format(statistics.stdev(percentages)))
示例#8
0
def normal(movements, target, iterations=10, hidden=6, outtype='logistic'):
	# Split data into 3 sets

	# Training updates the weights of the network and thus improves the network
	train = movements[::2,0:40]
	train_targets = target[::2]

	# Validation checks how well the network is performing and when to stop
	valid = movements[1::4,0:40]
	valid_targets = target[1::4]

	# Test data is used to evaluate how good the completely trained network is.
	test = movements[3::4,0:40]
	test_targets = target[3::4]

	# Initialize the network:
	net = mlp.mlp(train, train_targets, hidden, beta, momentum, outtype)

	# Run training:
	net.earlystopping(train, train_targets, valid, valid_targets,learning_rate,iterations)

	# Check how well the network performed:
	print("HN: {} - Iter: {} - LR: {}".format(hidden, iterations, learning_rate))
	print("Momentum: {} - Type: {}" .format(momentum, outtype))

	net.confusion(test,test_targets)
示例#9
0
文件: a.py 项目: ecksun/ANN-Lab1
def do(beta, eta, iterations, color = 'k'):
    nhidden = 2
    momentum = 0.9
    print "%s_beta-%f_eta-%f_iterations-%f" % (test, beta, eta, iterations)

    tron = mlp.mlp(classC, targetC, nhidden, beta, momentum, 'logistic')

    tron.mlptrain(classC, targetC, eta, iterations)

    xrange = numpy.arange( -4 , 4 , 0.1 )

    yrange = numpy.arange(-4 , 4 , 0.1 )

    xgrid,ygrid = numpy.meshgrid ( xrange , yrange )

    noOfPoints = xgrid.shape[ 0 ] * xgrid.shape [ 1 ]

    xcoords = xgrid.reshape( ( noOfPoints , 1 ) )
    ycoords = ygrid.reshape( ( noOfPoints , 1 ) )
    samples = numpy.concatenate( ( xcoords , ycoords) , axis=1)

    ones = -numpy.ones ( xcoords.shape )

    samples = numpy.concatenate ( ( samples , ones ) , axis =1)

    indicator = tron.mlpfwd ( samples )
    indicator = indicator.reshape ( xgrid . shape )

    pylab.contour ( xrange , yrange , indicator , ( 0.5 , ), colors=color )
示例#10
0
    def unseen_tissue_learn(self, unseen_train_loader, unseen_test_loader):

        unseen_tissue_model = mlp(self.feature_num, self.layer, self.hidden)

        # First need to copy the original meta learning model
        unseen_tissue_model.copy_weights(self.observed_tissue_model)
        unseen_tissue_model.cuda()
        #unseen_tissue_model.train()
        unseen_tissue_model.eval()

        unseen_opt = torch.optim.SGD(unseen_tissue_model.parameters(),
                                     lr=self.inner_lr)
        #unseen_opt = torch.optim.Adam(unseen_tissue_model.parameters(), lr=self.inner_lr, betas=(0.9, 0.99), eps=1e-05)

        # Here test_feature and test_label contains only one tissue info
        #unseen_train_loader, unseen_test_loader = get_unseen_data_loader(test_feature, test_label, K, args.inner_batch_size)
        for i in range(self.num_inner_updates):

            in_, target = unseen_train_loader.__iter__().next()
            loss, _ = forward_pass(unseen_tissue_model, in_, target)
            unseen_opt.zero_grad()
            loss.backward()
            unseen_opt.step()

        # Test on the rest of cell lines in this tissue (unseen_test_loader)
        mtrain_loss, mtrain_pear_corr, mtrain_spearman_corr, _, _ = evaluate_new(
            unseen_tissue_model, unseen_train_loader, 1)
        mtest_loss, mtest_pear_corr, mtest_spearman_corr, test_prediction, test_true_label = evaluate_new(
            unseen_tissue_model, unseen_test_loader, 0)

        return mtrain_loss, mtrain_pear_corr, mtrain_spearman_corr, mtest_loss, mtest_pear_corr, mtest_spearman_corr, test_prediction, test_true_label
示例#11
0
def trainData():
    with open('firesarranged.data') as f:
        numCol = len(f.readline().split())
    fireData = np.loadtxt('firesarranged.data',
                          dtype='S',
                          delimiter=',',
                          skiprows=1,
                          usecols=range(13)).astype(np.float)
    #fireData[:,:13] = fireData[:,:13]-fireData[:,:13].mean(axis=0)
    #print(np.shape(fireData), np.shape(targets))
    #imax = np.concatenate((fireData.max(axis=0)*np.ones((1,13)), np.abs(fireData.min(axis=0))*np.ones((1,13))), axis=0).max(axis=0)
    #targets[:,:1] = targets[:,:1]-targets[:,:1].mean(axis=0)
    #imax2 = np.concatenate((targets.max(axis=0)*np.ones((1)), np.abs(targets.min(axis=0))*np.ones((1))), axis=0).max(axis=0)
    #fireData[:,:13] = fireData[:,:13]/imax[:13]
    #print(np.shape(fireData))
    #print("Before", fireData)
    #print(fireData)
    #targets	[:,:1] = targets[:,:1]/imax2[:1]
    #print(np.shape(fireData), np.shape(targets))
    #pl.plot(fireData,targets,'.')
    #pl.show()
    fireData = np.delete(fireData, np.where(fireData[0:517, 12:13] <= 0), 0)
    fireData[0:517, 12:13] = np.log(fireData[0:517, 12:13] + 1)
    fireData = fireData / fireData.max(axis=0)

    #targets = fireData[0:517,12:13]
    #print(np.shape(fireData[0:517,12:13]))
    #print(np.shape(targets))
    #fireData = fireData[0:517,12:13][fireData[0:517,12:13] > 0]
    #print(np.shape(fireData))
    #print(np.amin(fireData[0:270,12:13]))
    n, bins, patches = plt.hist(fireData[0:270, 12:13],
                                50,
                                facecolor='green',
                                alpha=0.75,
                                rwidth=50)

    np.random.shuffle(fireData)
    plt.show()
    #print(imax)
    #print("Item", fireData.item(233), "Target", targets.item(233))
    #print(np.shape(fireData[:,:12]))
    train = fireData[0:135, 2:12]
    traintarget = fireData[0:135, 12:13]
    test = fireData[135:202, 2:12]
    testtarget = fireData[135:202, 12:13]
    valid = fireData[202:267, 2:12]
    validtarget = fireData[202:267, 12:13]

    #net = mlp.mlp(train,traintarget,105,outtype='linear')
    #net.mlptrain(train,traintarget,0.1,1001)
    net = mlp.mlp(train, traintarget, 15, outtype='linear')
    net.mlptrain(train, traintarget, .3, 450)
    #net = mlp.mlp(traindata,traindatat,10,outtype='linear')
    #net.mlptrain(traindata,traindatat,.4,1000)
    net.earlystopping(train, traintarget, valid, validtarget, 0.3)
示例#12
0
    def build(self, x):
        ''' Build the coupling layers 
        Args:
            x: tensor like, input tensor
        '''
        name = 'coupling_'+str(self.ctr)
        with tf.variable_scope(name):
            # mask
            self.mask = tf.Variable(self.maskval, dtype=tf.float64, trainable=False)

            # input tensor
            xpp = tf.identity(x)
            self.xd = tf.multiply(xpp, self.mask)

            # defining s(•) and t(•) (MLPs)
            self.s = mlp(0, self.xd, self.size, self.depth, self.width, tf.nn.tanh)
            self.t = mlp(1, self.xd, self.size, self.depth, self.width)

            # feed forward output
            if (self.mode == 'ff'):
                self.xa = tf.multiply(xpp, tf.exp(self.s.y)) + self.t.y
                self.y = self.xd + tf.multiply(self.xa, 1-self.mask)
            # feed backward output
            elif (self.mode == 'fb'):
                self.xa = tf.multiply(tf.exp(-self.s.y), xpp-self.t.y)
                self.y = self.xd + tf.multiply(self.xa, 1-self.mask)
            else:
                raise ValueError('Inexistent mode for coupling layer')

            # jacobian determinant computation for the forward function (closed form)
            if (self.mode == 'ff'):
                self.detjac = tf.exp(tf.reduce_sum(
                    tf.multiply(1-self.mask, self.s.y), axis=1))
                self.detjactest = tf.matrix_determinant(
                    tf.stack([tf.gradients(self.y[:, idx], x)[0] for idx in range(self.size)], axis=1))
            elif (self.mode == 'fb'):
                self.detjac = tf.exp(-tf.reduce_sum(
                        tf.multiply(1-self.mask, self.s.y), axis=1))
                self.detjactest = tf.matrix_determinant(
                    tf.stack([tf.gradients(self.y[:, idx], x)[0] for idx in range(self.size)], axis=1))
            else:
                raise ValueError('Inexistent mode for coupling layer')
示例#13
0
 def add_biaffine_global(self, inputs):
     ## inputs [seq_len, batch_size, units] = [n, b, d]
     ## first define four different MLPs
     arc_roles = ['arc-dep', 'arc-head']
     rel_roles = ['rel-dep', 'rel-head']
     vectors = {}
     for arc_role in arc_roles:
         for i in xrange(self.opts.mlp_num_layers):
             if i == 0:
                 inputs_dim = self.outputs_dim
                 vector_mlp = inputs
             else:
                 inputs_dim = self.opts.arc_mlp_units
             weights = get_mlp_weights('{}_MLP_Layer{}'.format(arc_role, i),
                                       inputs_dim, self.opts.arc_mlp_units)
             vector_mlp = self.add_dropout(
                 tf.map_fn(lambda x: mlp(x, weights), vector_mlp),
                 self.mlp_prob)
         vectors[arc_role] = vector_mlp
         ## [seq_len, batch_size, arc_mlp_units]
     arc_weights = get_arc_weights('arc', self.opts.arc_mlp_units)
     for rel_role in rel_roles:
         for i in xrange(self.opts.mlp_num_layers):
             if i == 0:
                 inputs_dim = self.outputs_dim
                 vector_mlp = inputs
             else:
                 inputs_dim = self.opts.rel_mlp_units
             weights = get_mlp_weights('{}_MLP_Layer{}'.format(rel_role, i),
                                       inputs_dim, self.opts.rel_mlp_units)
             vector_mlp = self.add_dropout(
                 tf.map_fn(lambda x: mlp(x, weights), vector_mlp),
                 self.mlp_prob)
         vectors[rel_role] = vector_mlp
         ## [seq_len, batch_size, rel_mlp_units]
     rel_weights = get_rel_weights('rel', self.opts.rel_mlp_units,
                                   self.loader.nb_rels)
     global_weights = get_global_weights('global', self.opts.arc_mlp_units,
                                         self.loader.nb_rels)
     arc_output, rel_output, rel_scores = global_equation(
         vectors, arc_weights, rel_weights, global_weights)
     return arc_output, rel_output, rel_scores
示例#14
0
    def __init__(self,
                 meta_dataset,
                 fs_dataset,
                 K,
                 meta_lr,
                 inner_lr,
                 layer,
                 hidden,
                 tissue_num,
                 meta_batch_size,
                 inner_batch_size,
                 num_updates,
                 num_inner_updates,
                 tissue_index_list,
                 patience=3,
                 num_trials=10):

        super(self.__class__, self).__init__()

        self.meta_dataset = meta_dataset
        self.fs_dataset = fs_dataset

        self.meta_batch_size = meta_batch_size
        self.inner_batch_size = inner_batch_size
        self.num_updates = num_updates
        self.num_inner_updates = num_inner_updates
        self.num_trials = num_trials
        self.hidden = hidden
        self.patience = patience
        self.feature_num = self.fs_dataset.feature.shape[1]

        self.K = K
        self.meta_lr = meta_lr
        self.inner_lr = inner_lr
        self.layer = layer
        self.hidden = hidden
        self.tissue_index_list = tissue_index_list
        self.tissue_num = tissue_num

        self.observed_tissue_model = mlp(self.feature_num, layer, hidden)
        self.observed_opt = torch.optim.Adam(
            self.observed_tissue_model.parameters(),
            lr=self.meta_lr,
            betas=(0.9, 0.99),
            eps=1e-05)
        self.inner_net = InnerLoop(self.num_inner_updates, self.inner_lr,
                                   self.feature_num, layer, hidden)

        #torch.cuda.manual_seed(args.seed)
        self.observed_tissue_model.cuda()
        self.inner_net.cuda()
示例#15
0
def getFitness(string):
    n = int(string.replace('.',''), 2)
#     print "Checking fitness for: ", string
#     print "Converted to decimal: ", n
    net = mlp.mlp(train_in,train_tgt,n,outtype='softmax')
    train = np.concatenate((train_in,-np.ones((np.shape(train_in)[0],1))),axis=1)
    fitness = net.mlpfwd(train)
    fitness = ((0.5*np.sum(fitness**2)))
    if (math.isnan(fitness) or fitness == 0):
        fitness = 1
#    fitness = net.confmat(test_in,test_tgt)

    #print "Fitness is: ", fitness
    return fitness
示例#16
0
def train_and_eval(num_iterations, learning_rate, num_neurons, momentum):
    split = int(len(data) *
                .9)  #no ten-fold. Use 90% for training and rest for validation
    train = data[:split, :]
    train_tgt = targets[:split]
    validation = data[split:, :]
    val_tgt = targets[split:]
    p = mlp.mlp(train,
                train_tgt,
                num_neurons,
                momentum=momentum,
                outtype='logistic')
    p.mlptrain(train, train_tgt, learning_rate, num_iterations)
    conf_mat = p.confmat(validation, val_tgt)
    acc = get_accuracy(conf_mat)
    return acc
示例#17
0
    def zero_shot_test(self, unseen_train_loader, unseen_vali_loader,
                       unseen_test_loader):

        unseen_tissue_model = mlp(self.feature_num, self.layer, self.hidden)

        # First need to copy the original meta learning model
        unseen_tissue_model.copy_weights(self.observed_tissue_model)
        unseen_tissue_model.cuda()
        unseen_tissue_model.eval()

        train_performance = evaluate_cv(unseen_tissue_model,
                                        unseen_train_loader)
        vali_performance = evaluate_cv(unseen_tissue_model, unseen_vali_loader)
        test_performance = evaluate_cv(unseen_tissue_model, unseen_test_loader)

        return train_performance, vali_performance, test_performance, np.mean(
            tissue_loss)
示例#18
0
文件: main_nat.py 项目: vd114/galatea
 def __init__(self, options, channel):
     """
     options: a dictionary contains all the configurations
     channel: jobman channel
     """
     # Step 0. Load data
     print 'Loading data'
     data = numpy.load(options['data'])
     self.options = options
     self.channel = channel
     # Step 1. Construct Model
     print 'Constructing Model'
     if options['model'] == 'mlp':
         model = mlp(options, channel, data)
     elif options['model'] == 'daa':
         model = daa(options, channel, data)
     self.model = model
     print 'Constructing algo'
     # Step 2. Construct optimization technique
     if options['algo'] == 'natSGD_basic':
         algo = natSGD(options, channel, data, model)
     elif options['algo'] == 'natSGD_jacobi':
         algo = natSGD_jacobi(options, channel, data, model)
     elif options['algo'] == 'natSGD_ls':
         algo = natSGD_linesearch(options, channel, data, model)
     elif options['algo'] == 'natNCG':
         algo = natNCG(options, channel, data, model)
     elif options['algo'] == 'krylov':
         algo = KrylovDescent(options, channel, data, model)
     elif options['algo'] == 'hf':
         raise NotImplemented
     elif options['algo'] == 'hf_jacobi':
         raise NotImplemented
     elif options['algo'] == 'sgd':
         algo = SGD(options, channel, data, model)
     self.algo = algo
     self.options['validscore'] = 1e20
     self.train_timing = numpy.zeros((options['loopIters'], 13),
                                     dtype='float32')
     self.valid_timing = numpy.zeros((options['loopIters'], 2),
                                     dtype='float32')
     if self.channel is not None:
         self.channel.save()
     self.start_time = time.time()
     self.batch_start_time = time.time()
示例#19
0
def mlp_process(data, labels):
    w = 2
    h = len(data)
    target = [[0 for model in range(w)] for y in range(h)]
    target = np.array(target)
    # print(labels)

    for i in range(data.shape[0]):
        if (labels[i] == 1):
            target[i, 0] = 1
        elif (labels[i] == 2):
            target[i, 1] = 1
        # elif(labels[i] == 3):
        #     target[i,2] = 1
        # elif(labels[i] == 4):
        #     target[i,3] = 1
        # elif(labels[i] == 5):
        #     target[i,4] = 1
        # elif(labels[i] == 6):
        #     target[i,5] = 1
        # elif(labels[i] == 7):
        #     target[i,6] = 1
        # elif(labels[i] == 8):
        #     target[i,7] = 1

    # print(target)
    #split the data into train, test and validation data for mlp
    #Set up Neural Network
    # x_train, y_train, x_valid, y_valid = train_test_split(data, target, test_size = 0.3, random_state = 0)
    x_train = data[0::2, :]
    x_valid = data[1::4, :]
    x_test = data[3::4, :]

    y_train = target[0::2, :]
    y_valid = target[1::4, :]
    y_test = target[3::4, :]

    hidden_layers = 50
    learning_rate = 0.1
    net = mlp.mlp(x_train, y_train, hidden_layers, outtype='softmax')
    net.earlystopping(x_train, y_train, x_valid, y_valid, learning_rate)
    net.confmat(x_test, y_test)
    return net
示例#20
0
def learn_nonlinear_mapping(hid_layers=4):    
    hid_layers=3
    train=CBK[0::2,:]/100.
    SFR=vectorize_codebook(STK,FRA,REG)
    target=SFR[0::2,-3:]    
    print train.shape, target.shape
    net = mlp(train,target,hid_layers,outtype='linear')
    valid=CBK[1::4,:]/100.
    valtarg=SFR[1::4,-3:]
    print valid.shape, valtarg.shape
    #net.mlptrain(train,target,0.9,100)
    net.earlystopping(train,target,valid,valtarg,0.9)#eta was 0.25
    test=CBK[3::4,:]/100.
    testtarg=SFR[3::4,-3:]
    print test.shape, testtarg.shape
    exptest = np.concatenate((test,-np.ones((np.shape(test)[0],1))),axis=1)
    testout=net.mlpfwd(exptest)    
    print testtarg[:3,:]
    print testout[:3,:]
示例#21
0
文件: main.py 项目: cc13ny/galatea
 def __init__(self, options, channel):
     # Step 0. Load data
     print 'Loading data'
     data = numpy.load(options['data'])
     self.options = options
     self.channel = channel
     # Step 1. Construct Model
     print 'Constructing Model'
     if options['model'] == 'mlp':
         model = mlp(options, channel, data)
     elif options['model'] == 'daa':
         model = daa(options, channel, data)
     self.model = model
     print 'Constructing algo'
     # Step 2. Construct optimization technique
     if options['algo'] == 'natSGD_basic':
         algo =  natSGD(options, channel, data, model)
     elif options['algo'] == 'natSGD_jacobi':
         algo =  natSGD_jacobi(options, channel, data, model)
     elif options['algo'] == 'natSGD_ls':
         algo = natSGD_linesearch(options, channel, data, model)
     elif options['algo'] == 'natNCG':
         algo = natNCG(options, channel, data, model)
     elif options['algo'] == 'krylov':
         algo = KrylovDescent(options, channel, data, model)
     elif options['algo'] == 'hf':
         raise NotImplemented
     elif options['algo'] == 'hf_jacobi':
         raise NotImplemented
     elif options['algo'] == 'sgd':
         algo = SGD(options, channel, data, model)
     self.algo = algo
     self.options['validscore'] = 1e20
     self.options['testscore'] = 1e20
     self.train_timing = numpy.zeros((options['loopIters'],
                                 15), dtype='float32')
     self.valid_timing = numpy.zeros((options['loopIters'],), dtype='float32')
     self.test_timing = numpy.zeros((options['loopIters'],), dtype='float32')
     if self.channel is not None:
         self.channel.save()
     self.start_time = time.time()
     self.batch_start_time = time.time()
示例#22
0
def cross_validation(data, targets, test, test_targets, nhidden=5, k=10):

    data_sp = np.split(data, k)
    size = int(len(data) / k)
    targets_sp = np.split(targets, k)
    errs = np.empty(k)
    accs = np.empty(k)

    for i in range(k):
        t_ind = np.arange(len(data))
        t_ind = np.delete(t_ind, t_ind[i * size:(i + 1) * size])
        print(t_ind)
        valid = data_sp[i]
        valid_targets = targets_sp[i]
        train = data[t_ind]
        train_targets = targets[t_ind]
        print(train.shape)
        print(train_targets.shape)

        net = mlp.mlp(train, train_targets, nhidden)

        val_err, val_acc, tr_err, tr_acc = net.earlystopping(
            train, train_targets, valid, valid_targets, 50)

        plt.plot(val_err)
        plt.figure()
        plt.plot(val_acc)
        plt.figure()
        plt.plot(tr_err)
        plt.figure()
        plt.plot(tr_acc)
        plt.show()

        errs[i], accs[i] = net.test(test, test_targets)

        print("Error of %d fold is: " % (i + 1), errs[i])
        print("Accuracy of %d fold is: " % (i + 1), accs[i])

    print("Mean error is: ", np.mean(errs))
    print("Standard deviation of error is: ", np.std(errs))
    print("Mean accuracy is: ", np.mean(accs))
    print("Standard deviation of accuracy is: ", np.std(accs))
def getMatrix(nHidden, momentum):
    info = np.loadtxt('normalizeOutput.txt', delimiter=',')
    np.random.shuffle(info)
    ###START Split between test and valid sample###
    trainInputs = []
    trainTargets = []
    validInputs = []
    validTargets = []
    testInputs = []
    testTargets = []
    for sample in info[:len(info) // 5]:
        new = [0, 0]
        new[int(sample[-1])] = 1
        np.array(new)
        validInputs.append(sample[:-1])
        validTargets.append(new)

    for sample in info[len(info) // 5:2 * (len(info) // 5)]:
        new = [0, 0]
        new[int(sample[-1])] = 1
        np.array(new)
        testInputs.append(sample[:-1])
        testTargets.append(new)

    for sample in info[2 * (len(info) // 5):]:
        new = [0, 0]
        new[int(sample[-1])] = 1
        np.array(new)
        trainInputs.append(sample[:-1])
        trainTargets.append(new)
    ###END Split between test and valid sample###
    trainInputs = np.array(trainInputs)
    trainTargets = np.array(trainTargets)
    trainInputs = np.array(trainInputs)
    trainTargets = np.array(trainTargets)
    testInputs = np.array(testInputs)
    testTargets = np.array(testTargets)

    net = mlp.mlp(trainInputs, trainTargets, nHidden, momentum=momentum)
    net.earlystopping(trainInputs, trainTargets, validInputs, validTargets,
                      0.3)
    net.confmat(testInputs, testTargets)
示例#24
0
def train_eval_ten_fold(num_iterations, learning_rate, num_neurons, momentum):
    total = 0
    leave = 0
    for i in range(10):
        start = int(len(data) * leave)
        end = int(len(data) * (leave + .1))
        train = np.concatenate((data[:start, :], data[end:, :]))
        train_tgt = np.concatenate((targets[:start, :], targets[end:, :]))
        validation = data[start:end, :]
        val_tgt = targets[start:end, :]
        p = mlp.mlp(train,
                    train_tgt,
                    num_neurons,
                    momentum=momentum,
                    outtype='logistic')
        p.mlptrain(train, train_tgt, learning_rate, num_iterations)
        conf_mat = p.confmat(validation, val_tgt)
        acc = get_accuracy(conf_mat)
        total += acc
    return total / 10
示例#25
0
文件: dnn.py 项目: coreyker/pendant
	def fine_tune(self, data, target, depth='deep', batch_size=1, learning_rate=0.1, epochs=10, momentum=0, weight_decay=0):
		'''
		Discriminative fine tuning of MLP weights. In most cases pre_train() should be called first
		'''		
		# fine-tune all layers			
		if depth=='deep': 
			error = self.mlp.train(data, target, batch_size, learning_rate, epochs, momentum, weight_decay)

		# fine-tune deepest layer only
		elif depth=='shallow': 
			perceptron     = mlp.mlp([self.layers[-2], self.layers[-1]])
			error          = perceptron.train( self.calc_rep(data), target, batch_size, learning_rate, epochs, momentum )
			
			# copy weights into last layer
			self.mlp.W[-1] = np.copy(perceptron.W[-1])
			self.mlp.b[-1] = np.copy(perceptron.b[-1])	
		else:
			raise Exception("Unknown depth.  Currently recognized options are 'deep' or 'shallow'")						

		return error
示例#26
0
def initialise_data ():
    # Preprocessing data
    data = np.loadtxt('spambase/spambase.data',delimiter=',')
    data[:,:57] = data[:,:57]-data[:,:57].mean(axis=0)
    imax = np.concatenate((data.max(axis=0)*np.ones((1,58)),np.abs(data.min(axis=0)*np.ones((1,58)))),axis=0).max(axis=0)
    data[:,:57] = data[:,:57]/imax[:57]
   
    # 1-of-N encoding
    target = np.zeros((np.shape(data)[0],2));
    indices = np.where(data[:,57]==0) 
    target[indices,0] = 1
    indices = np.where(data[:,57]==1)
    target[indices,1] = 1
      
    #Take half of data for testing and compute fitness
    test = data[::2,0:57]
    testt = target[::2]
    
    #initialise MLP model,and using 2 hiddnes nodes and 'softmax' activation function
    net = mlp.mlp(test,testt,1,outtype='softmax') 
    return net,test,testt
def evaluateLearning():
    """Main runner method, can be modified to loop through different 
    hidden layers, note I have changed to MLPtrain method to return
    the x and y values of the errors. (Iteration and error amount) """

    figure(1)

    #Change this to use the mlp with different layers
    #for x in [1,2,3,5,10,15]:
    #for x in [13,14,15,16,17]:
    for x in [15]:
        print x
        net = mlp.mlp(inputs, inputs, x, outtype='logistic')
        y, x1 = net.mlptrain(inputs, inputs, 0.25, 20000)
        net.confmat(inputs, inputs)
        net.confmat(noiseinputs1, inputs)
        net.confmat(noiseinputs2, inputs)

        plot(x1, y, '.')
    xlabel('Number of Learning Iterations')
    ylabel('Learning Error (Log 10)')
    show()
示例#28
0
def main(argv):
    args = parseCmd(argv)
    initLogging(args)

    # Set up the data
    # x,t = getData1()
    x, t = getData2()

    # # Split into training, testing, and validation sets
    train = x[0::2, :]
    test = x[1::4, :]
    valid = x[3::4, :]
    traintarget = t[0::2, :]
    testtarget = t[1::4, :]
    validtarget = t[3::4, :]

    # Perform basic training with a small MLP
    net = mlp.mlp(train, traintarget, 20, outtype='linear')
    error = net.earlystopping(train, traintarget, valid, validtarget, 0.25)

    # show result for test values
    biasedTest = np.concatenate((test, -np.ones((np.shape(test)[0], 1))), axis=1)
    outputs = net.mlpfwd(biasedTest)

    print "Error: " + str(error)
    print "Test error: " + str((0.5 * sum((outputs - testtarget) ** 2))[0])

    with open("result.dat", "w") as f:
        for i in range(0, len(biasedTest)):
            f.write(str(biasedTest[i][0]) + " " + str(biasedTest[i][1]) + " " + str(outputs[i][0]) + "\n")

    with open("network_in.dat", "w") as f:
        for i, w1 in enumerate(net.weights1):
            for j, w in enumerate(w1):
                f.write(str(i) + " " + str(j) + " " + str(w) + "\n")
            f.write("\n")
    with open("network_hidden.dat", "w") as f:
        for w2 in net.weights2:
            f.write(str(w2[0]) + "\n")
示例#29
0
def getFitness(string):
    
#    Gather the first 5 bytes to calculate the number of hidden nodes, set one at random to off
    hiddenNodesActivation=[0,0,0,0,0]
    weightAct = []
    for i in xrange(5):
            hiddenNodesActivation[i]=string[i]
            
    x = random.randint(0,4) 
    hiddenNodesActivation[x]=0
    array=np.array([16,8,4,2,1])
    hiddenNodesActivation=np.array(hiddenNodesActivation)
    hiddenNodes=np.sum(hiddenNodesActivation*array)
    hiddenNodes = int(hiddenNodes)+1
    
#     create the activation matric for the weights, depends in the number of hidden nodes, set one to off at random
    for i in range(44*hiddenNodes): 
            weightAct.append(string[i+5]) 
    
    x = random.randint(0,len(weightAct)-1)
    weightAct[x] = 0
    weightAct = np.array(weightAct).reshape((44,hiddenNodes))
   
#    create mlp and turn of weights ass set above
    net = mlp.mlp(train_in,train_tgt,hiddenNodes,outtype='softmax')
    weight1 = net.weights1
    net.weights1 = weight1*weightAct

#     Take a few steps through the MLP and then check the percent correct, return that as fitness
#     edit MLP to return % correct
    valid = np.concatenate((train_in,-np.ones((np.shape(train_in)[0],1))),axis=1)
    
    count = 0
    while (count <50):
        net.mlpfwd(valid)
        count +=1
 
    return net.confmat(test_in,test_tgt)
def evaluateLearning():
    
    """Main runner method, can be modified to loop through different 
    hidden layers, note I have changed to MLPtrain method to return
    the x and y values of the errors. (Iteration and error amount) """
    
    figure(1)
    
    #Change this to use the mlp with different layers
    #for x in [1,2,3,5,10,15]:
    #for x in [13,14,15,16,17]:        
    for x in [15]:      
        print x
        net = mlp.mlp(inputs,inputs,x,outtype='logistic')
        y, x1 = net.mlptrain(inputs,inputs,0.25,20000)
        net.confmat(inputs,inputs)
        net.confmat(noiseinputs1,inputs)
        net.confmat(noiseinputs2,inputs)
        
        plot(x1,y,'.')
    xlabel('Number of Learning Iterations')
    ylabel('Learning Error (Log 10)')   
    show()   
示例#31
0
文件: main.py 项目: Finbark/test
def run_mlp():
    '''
    Run a MLP on using pre-processed pollen data. 
    The percentage correct is stored in an array.
    '''
    x = preprocess.preprocess('Pollens')
    pollen = np.array(x.create_one_file(SIMPLE_GRASS))
    pollen = x.normalise_max(pollen)
    train_set, train_set_target, test_set, test_set_target, validation_set, validation_set_target = x.make_groups(
        pollen, LABEL_SIZE, train_size=350, test_size=150, validation_size=150)

    p = mlp.mlp(train_set,
                train_set_target,
                30,
                momentum=0.9,
                outtype='softmax')
    error = p.earlystopping(train_set,
                            train_set_target,
                            validation_set,
                            validation_set_target,
                            0.1,
                            niterations=200)
    correct = p.confmat(test_set, test_set_target)
    def cross_validation(self, X, Y, M, lr, max_iters, batch_size, k,
                         act_func):
        fold_size = int(np.floor(X.shape[0] / k))
        accuracy_ls = []

        folds_X, folds_Y = self.create_mini_batch(X, Y, fold_size)
        folds_X = np.asarray(folds_X)
        folds_Y = np.asarray(folds_Y)

        for i in range(k):
            X_test = folds_X[i]
            Y_test = folds_Y[i]
            X_train = np.concatenate(np.delete(folds_X, i, 0), axis=0)
            Y_train = np.concatenate(np.delete(folds_Y, i, 0), axis=0)

            N, D = X_train.shape
            N, K = Y_train.shape
            W = np.random.randn(M, K) * 0.01
            V = np.random.randn(D, M) * 0.01
            mlp_nn = mlp.mlp(W, V)

            mlp_nn.fit(X_train, Y_train, M, lr, max_iters, batch_size,
                       act_func)
            predictions, accuracy = mlp_nn.predict(X_test, Y_test, act_func)
            accuracy_ls.append(accuracy)

#        if (act_func == 'ReLu'):
#
#        elif (act_func == 'Tanh'):
#            # do something
#            print('Using Tanh')
#
#        elif(act_func == 'Sigmoid'):
#            # do something
#            print('Using Sigmoid')

        return accuracy_ls, np.mean(accuracy_ls)
    def k_fold_cross_validation(self, inputs, targets, early_stop_test, early_stop_test_target, num_folds, eta):
        n = np.shape(inputs)[0]
        size_list = range(100, n, num_folds)
        average_accuracy_list = []

        for size in size_list:
            indices = range(0, size, int(size / num_folds))
            accuracy_list = []

            mlp_helper = mlp.mlp(7, int(size / num_folds) * (num_folds - 1), 1, 19)
            for i in indices:
                validation_set = inputs[i:i + int(size / num_folds), :]
                validation_targets = targets[i:i + int(size / num_folds), :]

                train_set = np.concatenate((inputs[:i, :], inputs[i + int(size / num_folds):size, :]))
                train_targets = np.concatenate((targets[:i, :], targets[i + int(size / num_folds):size, :]))

                accuracy_list.append(float(
                    self.early_stop(train_set, train_targets, validation_set, validation_targets, eta, mlp_helper,
                                    early_stop_test, early_stop_test_target)))

            average_accuracy_list.append(np.mean(accuracy_list))
            print(average_accuracy_list)

        plt.plot(size_list, average_accuracy_list, 'g^')
        plt.axis([size_list[0], size_list[len(size_list) - 1], 0, 100])
        plt.show()

        size_list = np.transpose(np.array(list([size_list])))
        average_accuracy_list = np.transpose(np.array(list([average_accuracy_list])))

        table = tabulate(np.concatenate((size_list, average_accuracy_list), axis=1),
                         headers=['apps taken', 'average accuracy(%)'], floatfmt='.3f')

        with open('k_fold_X_validation_observations.txt', 'w') as f:
            f.write(table)
示例#34
0
    valid_targets = target[1::4]

    # Test data is used to evaluate how good the completely trained network is.
    test = movements[3::4,0:40]
    test_targets = target[3::4]

    return train, train_targets, valid, valid_targets, test, test_targets

#end prepare_data

#########
## Run #
#######

input_n_epoch = 10000 # input("Type number of epochs (tested on 10, 100 and 1000): ")
input_n_neuron = 18# input("Type number of neurons in hidden layer (tested on 6, 8 and 12): ")

train, train_targets, valid, valid_targets, test, test_targets = prepare_data()

X = train
target = train_targets
number_of_neurons_in_input = X.shape[1]
number_of_targets = target.shape[1]

number_of_neurons_in_hidden = int(input_n_neuron) #12 #should test with 6, 8, 12
number_of_epochs = int(input_n_epoch) #1000 #10, 100, 1000
net = mlp(number_of_neurons_in_input, number_of_neurons_in_hidden, number_of_targets, beta=5)
net.early_stopping(X, target, valid, valid_targets, number_of_epochs)
threshold_value = 0.5
net.confusion(test, test_targets, threshold_value)
示例#35
0
def irismain():
    #1. make iris.data in usable form
    #2. make input set and output set out of it
    #3. make setpool out of the dataset
    #4. make pcn and train it
    #5. test on validation and testing set

    convert_iris()
    irisdata = np.loadtxt(
        "/home/swapnil/forgit/neuro-evolution/05/dataset/iris/newiris.data",
        delimiter=',')

    nin = 4  # for four features of iris
    nout = 3  # for 3 sets of iris flowers
    minerr = 10000000
    lis = []
    order = np.arange(np.shape(irisdata)[0])
    np.random.shuffle(order)
    irisdata = irisdata[order, :]
    irisdata[:, :4] = irisdata[:, :4] - irisdata[:, :4].mean(axis=0)
    imax = np.concatenate((irisdata.max(axis=0) * np.ones(
        (1, 7)), np.abs(irisdata.min(axis=0)) * np.ones((1, 7))),
                          axis=0).max(axis=0)
    irisdata[:, :4] = irisdata[:, :4] / imax[:4]
    errcal = 'confmat'
    eta = 0.29
    niterations = 500
    tlis = []
    for niterations in range(10, 1000, 10):
        minitererr = 10000000
        flag = 0
        for nhidden in range(1, 6):  # range for number of hidden nodes
            minnhiddenerr = 100000000
            for tupoftup in nextpartition(irisdata, nin, nout):
                train, traintarget = tupoftup[0]
                valid, validtarget = tupoftup[
                    1]  #each row of setpool is input and their targets so we need to seperate them
                test, testtarget = tupoftup[2]

                #np.concatenate((train,valid),axis=0)
                #np.concatenate((traintarget,validtarget),axis=0)
                #valid is of no use on perceptron because perceptron can not overfit!! and neither is early-stopping.
                net = mlp.mlp(train, traintarget, nhidden, outtype='logistic')

                net.mlptrain(train, traintarget, eta, niterations // 2)
                validmeanerr = net.earlystopping(train,
                                                 traintarget,
                                                 valid,
                                                 validtarget,
                                                 eta,
                                                 niterations // 10,
                                                 errcaltype=errcal)
                print("no. of nodes", nhidden)
                lis.append(validmeanerr)
                if errcal == 'squaremean':
                    trainmeanerr = net.findmeantrainerr(train, traintarget)
                else:
                    trainmeanerr = net.confmat(train, traintarget)
                print("validation error: %f trainerr error:%f" %
                      (validmeanerr, trainmeanerr))
                #y=np.concatenate((x,-np.ones((np.shape(x)[0],1))),axis=1)
                minnhiddenerr = min(minnhiddenerr, validmeanerr)
                if validmeanerr < minerr:  #see I can't use equal to here so that this way it will select one with lowest num of nodes
                    minerr = validmeanerr
                    bestnet = trainedmlp.trainedmlp(net, test, testtarget,
                                                    trainmeanerr, validmeanerr,
                                                    nhidden)
            if minnhiddenerr < 0.07:
                tempnhidden = nhidden
                flag = 1
                break
        if not flag:
            tempnhidden = 10
        tlis.append((niterations, tempnhidden))
    print(lis)
    niterationslis = [i[0] for i in tlis]
    temphiddenlis = [i[1] for i in tlis]
    iterarr = np.array(niterationslis) * np.ones((len(niterationslis), 1))
    nhiddenarr = np.array(temphiddenlis) * np.ones((len(temphiddenlis), 1))
    pl.plot(iterarr, nhiddenarr, '.')
    #pl.plot(x,,'o')
    pl.xlabel('iter')
    pl.ylabel('nhidden')

    pl.show()

    print("\n best network is attained with no. of nodes as ",
          bestnet.numnodes)
    leasterr = bestnet.test()
    print("error on test is %f while on valid  is %f" %
          (leasterr, bestnet.validmeanerr))
import numpy as np
from mlp import mlp

and_data = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]])
xor_data = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])

X_train = and_data[:, :2]
y_train = and_data[:, 2:]

eta =  0.25
n_iterations = 1001

p = mlp(X_train, y_train, 2)
p.mlptrain(X_train, y_train, eta, n_iterations)
p.confmat(X_train, y_train)
示例#37
0
文件: sinewave.py 项目: senavis/ML
# Split into training, testing, and validation sets
train = x[0::2,:]
test = x[1::4,:]
valid = x[3::4,:]
traintarget = t[0::2,:]
testtarget = t[1::4,:]
validtarget = t[3::4,:]

# Plot the data
plot(x,t,'o')
xlabel('x')
ylabel('t')

# Perform basic training with a small MLP
import mlp
net = mlp.mlp(train,traintarget,3,outtype='linear')
net.mlptrain(train,traintarget,0.25,101)

# Use early stopping
net.earlystopping(train,traintarget,valid,validtarget,0.25)

# Test out different sizes of network
#count = 0
#out = zeros((10,7))
#for nnodes in [1,2,3,5,10,25,50]:
#    for i in range(10):
#        net = mlp.mlp(train,traintarget,nnodes,outtype='linear')
#        out[i,count] = net.earlystopping(train,traintarget,valid,validtarget,0.25)
#    count += 1
#    
#test = concatenate((test,-ones((shape(test)[0],1))),axis=1)
示例#38
0
# by Stephen Marsland (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)

# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.

# Stephen Marsland, 2008
# Kristian Valentin <*****@*****.**>, 2011

import numpy as np
import mlp

anddata = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]])
xordata = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])

p = mlp.mlp(anddata[:,0:2],anddata[:,2:3],1)
p.mlptrain(anddata[:,0:2],anddata[:,2:3],0.25,1001)
p.confmat(anddata[:,0:2],anddata[:,2:3])

q = mlp.mlp(xordata[:,0:2],xordata[:,2:3],2,beta=.6,outtype='logistic')
q.mlptrain(xordata[:,0:2],xordata[:,2:3],0.2,5001,verbose=True)
q.confmat(xordata[:,0:2],xordata[:,2:3])

#anddata = np.array([[0,0,1,0],[0,1,1,0],[1,0,1,0],[1,1,0,1]])
#xordata = np.array([[0,0,1,0],[0,1,0,1],[1,0,0,1],[1,1,1,0]])
#
#p = mlp.mlp(anddata[:,0:2],anddata[:,2:4],2,outtype='linear')
#p.mlptrain(anddata[:,0:2],anddata[:,2:4],0.25,1001)
#p.confmat(anddata[:,0:2],anddata[:,2:4])
#
#q = mlp.mlp(xordata[:,0:2],xordata[:,2:4],2,outtype='linear')
target = target[order, :]

# Split data into 3 sets

# Training updates the weights of the network and thus improves the network
train = movements[::2, 0:40]
train_targets = target[::2]

# Validation checks how well the network is performing and when to stop
valid = movements[1::4, 0:40]
valid_targets = target[1::4]

# Test data is used to evaluate how good the completely trained network is.
test = movements[3::4, 0:40]
test_targets = target[3::4]

# Try networks with different number of hidden nodes:
hidden = 12

# Initialize the network:
net = mlp.mlp(train, train_targets, hidden)

# Run training:
net.earlystopping(train, train_targets, valid, valid_targets)
# NOTE: You can also call train method from here,
#       and make train use earlystopping method.
#       This is a matter of preference.

# Check how well the network performed:
net.confusion(test, test_targets)
示例#40
0
mlp_train_score = []
mlp_valid_score = []
test_scores = []
for train_indices, valid_indices, test_indices in kf:

    train = iris[train_indices]
    train_tgt = targets[train_indices]

    valid = iris[valid_indices]
    valid_tgt = targets[valid_indices]

    test = iris[test_indices]
    test_tgt = targets[test_indices]

    """MLP"""
    mlpnet = mlp.mlp(train, train_tgt, 20, beta=1, momentum=0.9, outtype='logistic')
    mlpnet.mlptrain(train, train_tgt, 0.01, 1000, valid, valid_tgt)

    #Get training and validation scores on this Fold
    mlp_train_score.append(mlpnet.train_scores_list)
    mlp_valid_score.append(mlpnet.valid_scores_list)

    test_scores.append(mlpnet.score(test, test_tgt))


print test_scores
print np.sum(test_scores)

mlp_train_score = np.array(mlp_train_score)
mlp_valid_score = np.array(mlp_valid_score)
示例#41
0
target = zeros((shape(data)[0],3))
indices = where(data[:,p]==0)
target[indices,0] = 0
indices = where(data[:,p]==1)
target[indices,0] = 1
indices = where(data[:,p]==2)
target[indices,0] = 2


# Randomly order the data
order = range(shape(data)[0])
random.shuffle(order)
data = data[order,:]
target = target[order,:]

# Split into training, validation, and test sets
train = data[::2,0:p]
traint = target[::2]
valid = data[1::4,0:p]
validt = target[1::4]
test = data[3::4,0:p]
testt = target[3::4]

#print train.max(axis=0), train.min(axis=0)

# Train the network
import mlp
net = mlp.mlp(train,traint,5,outtype='softmax')
net.earlystopping(train,traint,valid,validt,0.1)
net.confmat(test,testt)
train = movements[::2,0:40]
traint = target[::2]

valid = movements[1::4,0:40]
validt = target[1::4]

test = movements[3::4,0:40]
testt = target[3::4]

# Train the network
k_fold = False
fold_number = 4

if not k_fold:
    net = mlp.mlp(train,traint)
    net.earlystopping(train, traint, valid, validt)
    net.confusion(test,testt)
else:
    movements = movements[::,0:40]
    movements = movements.tolist()
    target = target.tolist()
    folds = []
    tfolds = []
    fold_size = len(movements) / fold_number
    for i in range(0, fold_number):
        fold = []
        tfold = []
        for j in range(0, fold_size):
            select = random.randint(0, len(movements) - 1)
            fold.append(movements[select])
示例#43
0
f = gzip.open('../Data/mnist.pkl.gz','rb')
tset, vset, teset = cPickle.load(f)
f.close()

nread = 200
# Just use the first few images
train_in = tset[0][:nread,:]

# This is a little bit of work -- 1 of N encoding
# Make sure you understand how it does it
train_tgt = np.zeros((nread,10))
for i in range(nread):
    train_tgt[i,tset[1][i]] = 1

test_in = teset[0][:nread,:]
test_tgt = np.zeros((nread,10))
for i in range(nread):
    test_tgt[i,teset[1][i]] = 1

# We will need the validation set
valid_in = vset[0][:nread,:]
valid_tgt = np.zeros((nread,10))
for i in range(nread):
    valid_tgt[i,vset[1][i]] = 1

for i in [1,2,5,10,20]:
    print "----- "+str(i)
    net = mlp.mlp(train_in,train_tgt,i,outtype='softmax')
    net.earlystopping(train_in,train_tgt,valid_in,valid_tgt,0.1)
    net.confmat(test_in,test_tgt)
示例#44
0
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.

# Stephen Marsland, 2008

from numpy import *

import mlp


anddata = array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]])
xordata = array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])

p = mlp.mlp(anddata[:,0:2],anddata[:,2:3],2)
p.mlptrain(anddata[:,0:2],anddata[:,2:3],0.25,1001)
p.confmat(anddata[:,0:2],anddata[:,2:3])

q = mlp.mlp(xordata[:,0:2],xordata[:,2:3],2,outtype='logistic')
q.mlptrain(xordata[:,0:2],xordata[:,2:3],0.25,5001)
q.confmat(xordata[:,0:2],xordata[:,2:3])

#anddata = array([[0,0,1,0],[0,1,1,0],[1,0,1,0],[1,1,0,1]])
#xordata = array([[0,0,1,0],[0,1,0,1],[1,0,0,1],[1,1,1,0]])
#
#p = mlp.mlp(anddata[:,0:2],anddata[:,2:4],2,outtype='linear')
#p.mlptrain(anddata[:,0:2],anddata[:,2:4],0.25,1001)
#p.confmat(anddata[:,0:2],anddata[:,2:4])
#
#q = mlp.mlp(xordata[:,0:2],xordata[:,2:4],2,outtype='linear')
示例#45
0

pima = np.loadtxt('C:\Users\subha\PycharmProjects\MLP_Pima/pima-indians-diabetes.data',delimiter=',')

# Plot the first and second values for the two classes
indices0 = np.where(pima[:,8]==0)
indices1 = np.where(pima[:,8]==1)


pima[:,:8] = pima[:,:8]-pima[:,:8].mean(axis=0)
imax = np.concatenate((pima.max(axis=0)*np.ones((1,9)),np.abs(pima.min(axis=0)*np.ones((1,9)))),axis=0).max(axis=0)
pima[:,:8] = pima[:,:8]/imax[:8]
print"\n"


p = mlp.mlp(pima[:,:8],pima[:,8:9],5,outtype='logistic')
p.mlptrain(pima[:,:8],pima[:,8:9],0.25,500)
print"\n"
p.confmat(pima[:,:8],pima[:,8:9])


print "output after preprocessing the data"
train = pima[::2,:8]
traint = pima[::2,8:9]
valid = pima[1::8,0:8]
validt = pima[1::8,8:9]
test = pima[1::2,:8]
testt = pima[1::2,8::9]


import mlp
示例#46
0
文件: iris.py 项目: hushell/kod
# Split into training, validation, and test sets
target = zeros((shape(iris)[0], 3))
indices = where(iris[:, 4] == 0)
target[indices, 0] = 1
indices = where(iris[:, 4] == 1)
target[indices, 1] = 1
indices = where(iris[:, 4] == 2)
target[indices, 2] = 1

# Randomly order the data
order = range(shape(iris)[0])
random.shuffle(order)
iris = iris[order, :]
target = target[order, :]

train = iris[::2, 0:4]
traint = target[::2]
valid = iris[1::4, 0:4]
validt = target[1::4]
test = iris[3::4, 0:4]
testt = target[3::4]

# print train.max(axis=0), train.min(axis=0)

# Train the network
import mlp

net = mlp.mlp(train, traint, 5, outtype="softmax")
net.earlystopping(train, traint, valid, validt, 0.1)
net.confmat(test, testt)
示例#47
0
target = target[order,:]

# Split data into 3 sets

# Training updates the weights of the network and thus improves the network
train = movements[::2,0:40]
train_targets = target[::2]

# Validation checks how well the network is performing and when to stop
valid = movements[1::4,0:40]
valid_targets = target[1::4]

# Test data is used to evaluate how good the completely trained network is.
test = movements[3::4,0:40]
test_targets = target[3::4]

# Try networks with different number of hidden nodes:
hidden = 12

# Initialize the network:
net = mlp.mlp(train, train_targets, hidden)

# Run training:
net.earlystopping(train, train_targets, valid, valid_targets)
# NOTE: You can also call train method from here,
#       and make train use earlystopping method.
#       This is a matter of preference.

# Check how well the network performed:
net.confusion(test,test_targets)
for i in range(len(set_size)):
    if i > 0:
        set_size[i] = set_size[i] + set_size[i - 1]

train = data[:set_size[0],0:col-output_size]
traint = target[:set_size[0]]
valid = data[set_size[0]:set_size[1],0:col-output_size]
validt = target[set_size[0]:set_size[1]]
test = data[set_size[1]:set_size[2],0:col-output_size]
testt = target[set_size[1]:set_size[2]]

# Train the network
# MLP
if run[0]:
    import mlp as mlp_module
    mlp = mlp_module.mlp(train,traint,nhidden,outtype='softmax')
    mlp.earlystopping(train,traint,valid,validt,eta)
    mlp_correct_pct = mlp.confmat(test,testt)
    if save_results:
        mlp.get_data(resultPath, filename, SEED)
        print 'MLP weights saved'

#RBF
if run[1]:
    import rbf as rbf_module
    rbf = rbf_module.rbf(train,traint,nRBF,1,1)
    rbf.rbftrain(train,traint,0.25,2000)
    rbf_correct_pct = rbf.confmat(test,testt)
    if save_results:
        rbf.get_data(resultPath + filename)
        print 'RBF weights saved'
示例#49
0
# Split into training, validation, and test sets
target = np.zeros((np.shape(iris)[0], 3));
indices = np.where(iris[:, 4] == 0)
target[indices, 0] = 1
indices = np.where(iris[:, 4] == 1)
target[indices, 1] = 1
indices = np.where(iris[:, 4] == 2)
target[indices, 2] = 1

# Randomly order the data
order = list(range(np.shape(iris)[0]))
np.random.shuffle(order)
iris = iris[order, :]
target = target[order, :]

train = iris[::2, 0:4]
traint = target[::2]
valid = iris[1::4, 0:4]
validt = target[1::4]
test = iris[3::4, 0:4]
testt = target[3::4]

# print train.max(axis=0), train.min(axis=0)

# Train the network
import mlp

net = mlp.mlp(train, traint, 5, outtype='logistic')
net.earlystopping(train, traint, valid, validt, 0.1)
net.confmat(test, testt)
# Read the dataset in (code from sheet)
f = gzip.open('mnist.pkl.gz', 'rb')
tset, vset, teset = pickle.load(f, encoding='iso-8859-1')
f.close()

nread = 200
# Just use the first few images
train_in = tset[0][:nread, :]

# This is a little bit of work -- 1 of N encoding
# Make sure you understand how it does it
train_tgt = np.zeros((nread, 10))
for i in range(nread):
    train_tgt[i, tset[1][i]] = 1

test_in = teset[0][:nread, :]
test_tgt = np.zeros((nread, 10))
for i in range(nread):
    test_tgt[i, teset[1][i]] = 1

# We will need the validation set
valid_in = vset[0][:nread, :]
valid_tgt = np.zeros((nread, 10))
for i in range(nread):
    valid_tgt[i, vset[1][i]] = 1

for i in [1, 2, 5, 10, 20]:
    print("----- " + str(i))
    net = mlp.mlp(train_in, train_tgt, i, outtype='softmax')
    net.earlystopping(train_in, train_tgt, valid_in, valid_tgt, 0.1)
    net.confmat(test_in, test_tgt)
示例#51
0
    
    print_NN_params() #remind us what architecture was tested
    start = time.time()

    if train_func == 'B':
        '''BackPropagation makes use of mlp.py and mlp2.py, codes for 1 and 2 layer Networks respectively,
           written by Stephen Marsland (check modules for ref) '''
        
        import mlp, mlp2 #, mlp_threaded - Not implemented yet
        signal.signal(signal.SIGINT, signal_handler)  #register the signal handler

        #Build the network 
        if nlayers > 1:
            net = mlp2.mlp(train,traint, nhidden, nhiddeno, outtype='softmax')
        elif nlayers == 1:
            net = mlp.mlp(train,traint, nhidden, outtype='softmax')
        
        print 'training mlp...'
        net.mlptrain(train,traint, 0.1, 700*percent_dataset_usage) #train some iterations before calling earlystopping!!
        net.earlystopping(train,traint,valid,validt,0.1)  #train until validation error start increasing
        net.confmat(test,testt)   #autopsy report 
    
    elif train_func == 'R':  
        ''' Resilient Propagation makes use of pyBrain framework (must be installed).
            Significantly faster than Bprop!! '''
        #Signal handler (Ctrl-C) not implemented yet for Rprop... meaning if stopped its lost
        
        # Train the network
        from pybrain.datasets            import ClassificationDataSet
        from pybrain.utilities           import percentError
        from pybrain.tools.shortcuts     import buildNetwork
示例#52
0
# FUSE CLASSES INTO ONE DATASET
dataset = numpy.concatenate((class1, class2), axis=0)
# SHUFFLE THE DATASET
order = range(numpy.shape(dataset)[0])
random.shuffle(order)
dataset = dataset[order, :]

# CReATE TRAINDATA AND TARGETDATA
train = dataset[0:nrOfPoints, 0:2]
traint = numpy.zeros((len(train), 1))
indices = numpy.where(dataset[0:nrOfPoints, 2] == 1)
traint[indices, 0] = 1

# TRAINING!!!
ANN = mlp.mlp(train, traint, nhidden, beta, momentum)
ANN.mlptrain(train, traint, eta, iterations)
ANN.confmat(train, traint)

# PLOTTING?
xRange = numpy.arange(-4, 4, 0.1)  ###
yRange = numpy.arange(-4, 4, 0.1)  #
xgrid, ygrid = numpy.meshgrid(xRange, yRange)  #
#
noOfPoints = xgrid.shape[0] * xgrid.shape[1]  #
xcoords = xgrid.reshape((noOfPoints, 1))  ### CAN BE DONE WITH 2 FORLOOPS
ycoords = ygrid.reshape((noOfPoints, 1))  #
samples = numpy.concatenate((xcoords, ycoords), axis=1)  #
#
ones = -numpy.ones(xcoords.shape)  #
samples = numpy.concatenate((samples, ones), axis=1)  ###