Example #1
0
 def train(self, x_train, x_label, epoch, batch_size, eta, isClassification= True, smoothing= False):
         #train input and first hidden layer with unsupervised learning
         network = MLP.MLP([self.architecture[0], self.architecture[1], self.architecture[0]])
         epoc, result = network.train(x_train, x_train, epoch, batch_size, eta)
         #store the trained weights and biases of the first layer
         self.weights.append(network.getWeights()[0])
         self.biases.append(network.getBiases()[0])
         #train each hidden layer using unsupervised learning
         for x in range(self.number_of_layer - 3):
                 network = MLP.MLP([self.architecture[x+1], self.architecture[x+2], self.architecture[x+1]])
                 hidden_nodes = np.asarray([network.feed_forward(x, self.biases, self.weights) for x in x_train])
                 epoc, result = network.train(hidden_nodes, hidden_nodes, epoch, batch_size, eta)
                 #store trained weights and biases of each hidden layer
                 self.weights.append(network.getWeights()[0])
                 self.biases.append(network.getBiases()[0])                        
         #train prediced output layer by taking the last hidden layer and
         #use supervised learning(input layer and output layer is different)
         hidden_nodes = np.asarray([network.feed_forward(x, self.biases, self.weights) for x in x_train])
         network1 = MLP.MLP([self.architecture[-2], self.architecture[-1]])
         epoc1, result1 = network1.train(hidden_nodes, x_label, epoch, batch_size, eta)
         #store trained weights and biases of predicted layer
         self.weights.append(network1.getWeights()[0])
         self.biases.append(network1.getBiases()[0])
         
         #smoothing the whole trained network with backpropagation if smooting 
         #parameter is set to true to see if it generates better results
         if smoothing:
                 network = MLP.MLP(self.architecture)
                 network.setBiases(self.biases)
                 network.setWeights(self.weights)
                 #uses minimal number of epoch to reduce vanishing gradient 
                 #descent problem
                 network.train(x_train, x_label, 50, batch_size, eta)
                 self.biases = network.getBiases()
                 self.weights = network.getWeights()
Example #2
0
def main():
    print('Train new weights or load existing weights?')
    print('1: Train new weights')
    print('2: Load existing weights')
    x = input()
    if x == '1':
        print('Training neural network...')
        model = MLP.train()
        print('Neural network training complete!')
        model.save('MLP.h5')
    else:
        print('Loading neural network...')
        model = load_model('MLP.h5')
        print('Neural network loading complete!')

    file_name = input('Enter file name: ')
    print('Processing scanned invoice...')
    x, y = crop.finder(file_name)
    crop.crop_invoice(x, y, file_name)
    preprocess.prep_input()

    print('Updating excel file...')
    serial = MLP.get_serial(model)
    date = MLP.get_date(model)
    excel.add_date(date)
    values, unit_check = MLP.get_data(model)
    excel.check_values(values, unit_check)
    excel.add_values(values)
    print('Excel file updated!')
Example #3
0
    def train(self, event):
        self.perceptron = MLP.MultiLayerPerceptron(1044, 32, 1)
        self.training_set = MLP.get_set_from_file(name='training_set.txt')
        self.testing_set = MLP.get_set_from_file(name='testing_set.txt')

        learn_error_list = []
        testing_error_list = []
        for i in range(15):
            self.perceptron.network_training(self.training_set)
            print('Эпоха: {0}\tОшибка обучения: {1}\tОшибка обобщения: {2}'.
                  format(
                      i,
                      self.perceptron.calculate_total_network_error(
                          self.training_set),
                      self.perceptron.network_testing(self.testing_set)))

            with open('perceptron.pickle', 'wb') as f:
                pickle.dump(self.perceptron, f)

            learn_error = self.perceptron.calculate_total_network_error(
                self.training_set)
            testing_error = self.perceptron.network_testing(self.testing_set)

            learn_error_list.append(learn_error)
            testing_error_list.append(testing_error)

            if 0.5 * (learn_error - testing_error)**2 < 0.0001 and (
                    learn_error + testing_error) / 2 < 0.01:
                break

            #self.training_set.reverse()
            #self.testing_set.reverse()
        '''for i in range(1, 11):
def define_nn(n_input_dims,
              n_hidden_dims,
              n_hidden_layer,
              n_output_dims,
              W_init=None,
              b_init=None):
    layer_sizes = [n_input_dims]
    activations = []
    for i in xrange(0, n_hidden_layer):
        layer_sizes.append(n_hidden_dims)
        activations.append(T.nnet.sigmoid)

    layer_sizes.append(n_output_dims)
    # activations.append(T.nnet.relu)
    activations.append(None)

    # Set initial parameter values
    act_count = 0
    if W_init == None:
        W_init = []
        b_init = []
        for n_input, n_output in zip(layer_sizes[:-1], layer_sizes[1:]):
            if activations[act_count] == T.tanh:
                W_init.append(
                    np.random.RandomState(RDMSEED).uniform(
                        low=-np.sqrt(6. / (n_output + n_input)),
                        high=np.sqrt(6. / (n_output + n_input)),
                        size=(n_output, n_input)))
            elif activations[act_count] == T.nnet.sigmoid:
                W_init.append(
                    np.random.RandomState(RDMSEED).uniform(
                        low=-4 * np.sqrt(6. / (n_output + n_input)),
                        high=4 * np.sqrt(6. / (n_output + n_input)),
                        size=(n_output, n_input)))
            elif activations[act_count] == T.nnet.relu:
                W_init.append(
                    np.random.RandomState(RDMSEED).normal(
                        0., np.sqrt(2. / n_input), (n_output, n_input)))
            else:
                W_init.append(
                    np.random.RandomState(RDMSEED).normal(
                        0., 0.01, (n_output, n_input)))
            act_count += 1
            b_init.append(np.zeros(n_output))

        #mlp = MLP.MLP_wDO(W_init, b_init, activations)
        mlp = MLP.MLP(W_init, b_init, activations)
    else:
        mlp = MLP.MLP(W_init, b_init, activations)

    return mlp
Example #5
0
    def __init__(self,
                 n_models,
                 n_input,
                 n_hidden,
                 n_output,
                 eta=0.1,
                 lambd=0.0,
                 alfa=0.0,
                 range_W_h_start=-0.7,
                 range_W_h_end=0.7,
                 range_W_o_start=-0.7,
                 range_W_o_end=0.7,
                 use_fan_in=False,
                 activation_hidden="sigmoid",
                 activation_output="sigmoid"):

        self._n_models = n_models

        self._bag_errors = []
        self._bag_valid_errors = []
        self._bag_accuracies = []
        self._bag_valid_accuracies = []

        self._models = []

        for i in range(n_models):
            self.models.append(
                MLP(n_input, n_hidden, n_output, eta, lambd, alfa,
                    range_W_h_start, range_W_h_end, range_W_o_start,
                    range_W_o_end, use_fan_in, activation_hidden,
                    activation_output))
Example #6
0
    def runSVM(self):

        if self.isModelReady is False:
            QMessageBox.about(
                self, 'Message',
                'The model is not made yet!\n Please make the model!')
            return

        # print("--------TRAINING--------")
        if self.fileName != "":
            self.splitSize = int(self.split_lineEdit.text())
            self.fileName = self.csv_lineEdit.text()
            self.epochs = int(self.epochs_lineEdit.text())

            if self.splitSize <= 40:
                # print("Test percentage: ",self.splitSize)
                self.results = MLP.run(file_name=self.fileName,
                                       model=self.m.temp_model,
                                       testing_percentage=self.splitSize)
            else:
                pass  # print("cannot train on such small dataset")
        else:
            pass  # print("incorrect file name!")
        # print("--------SUCCESSFUL--------")

        QMessageBox.about(self, "Results:", self.results)
Example #7
0
 def __init__(self, *args, **kwargs):
     super(retrainPacketModel, self).__init__(*args, **kwargs)
     print(args)
     print(kwargs)
     self.lock = kwargs['args'][1]
     self.db = kwargs['args'][0]
     self.mlp = mlp.MLP([100,100], 147)
Example #8
0
 def run_tests(self):
     count = 0
     for vocabSize in self.vocabSizeVector:
         for maxSequenceLength in self.maxSequenceLengthVector:
             for vectorizationType in self.vectorizationTypeVector:
                 for epochs in self.epochsVector:
                     for minNumArticlesPerDewey in self.minNumArticlesPerDeweyVector:
                         new_configFile, run_name = self.create_config_file(
                             vocabSize, maxSequenceLength,
                             vectorizationType, epochs,
                             minNumArticlesPerDewey)
                         tid = time.time()
                         count += 1
                         run_length = len(self.vocabSizeVector) * len(
                             self.maxSequenceLengthVector) * len(
                                 self.vectorizationTypeVector) * len(
                                     self.epochsVector) * len(
                                         self.minNumArticlesPerDeweyVector)
                         print("Gjør test nr {} av {} : ".format(
                             count, run_length))
                         mlp_model = MLP.mlp(new_configFile)
                         mlp_model.fit()
                         mlp_model.predict(self.testSetPath)
                         mlp_model.get_predictions(mlp_model.predictions,
                                                   mlp_model.correct_deweys)
                         mlp_model.evaluate_prediction()
                         new_logPath = os.path.join(self.logFolder,
                                                    run_name + ".log")
                         mlp_model.printResultToLog(new_logPath)
                         print("Det tok {} \n".format(time.time() - tid))
Example #9
0
 def run_MLP(self,key, dataset, train, test, isClassification, input_neuron, output_neuron, num_hidden_layers):
         #takes network architecture arguments and run Class MLP accordingly                
         x_train, train_label = ld.LoadDataset().get_neural_net_input_shape(dataset, train, isClassification)
         x_test, test_label = ld.LoadDataset().get_neural_net_input_shape(dataset, test, isClassification)
         #call class MLP based on the hidden layers provided.
         if num_hidden_layers == 0:
                 net = MLP.MLP([input_neuron, output_neuron])
         elif num_hidden_layers == 1:
                 net = MLP.MLP([input_neuron, ld.LoadDataset().get1sthiddenlayernode(key), output_neuron])
         else:
                 node_list = ld.LoadDataset().get2ndhiddenlayernode(key)
                 net = MLP.MLP([input_neuron, node_list[0], node_list[1], output_neuron])
         epoc, result = net.train(x_train, train_label, 300, 50, 2, isClassification)
         plot_graph(key+' with hidden nodes '+ str(num_hidden_layers), epoc, result)
         predicted = net.test(x_test, isClassification)
         return predicted, test.iloc[:, -1]
Example #10
0
    def run(self):
        print(
            "Thread {0}: starting {1} TRAINING with {2} dimensions and {3} hidden layers at {4}"
            .format(self.thread_ID, self.name, self.num_dim,
                    self.num_hidden_layers, time.ctime(time.time())))
        mlp = MLP.MLP(self.num_inputs, self.num_hidden_layers,
                      self.num_nodes_per_layer, self.num_outputs,
                      self.training_data)
        mlp.train()
        temp = []
        print(len(mlp.overall_error))
        with open('MLP{0} Learning Curve.csv'.format(self.thread_ID),
                  'w',
                  newline='') as csvfile:
            for i in range(len(mlp.overall_error)):
                temp.append(mlp.overall_error[i][0])
            writer = csv.writer(csvfile, delimiter=',')
            writer.writerow(temp)
            print(temp)

        print(
            "Thread {0}: starting {1} TESTING with {2} dimensions and {3} hidden layers at {4}"
            .format(self.thread_ID, self.name, self.num_dim,
                    self.num_hidden_layers, time.ctime(time.time())))

        result = mlp.hypothesis_of(self.testing_data)
        print(
            "Thread {0}: {1} result {5} with {2} dimensions and {3} hidden layers at {4}"
            .format(self.thread_ID, self.name, self.num_dim,
                    self.num_hidden_layers, time.ctime(time.time()), result))
        with open('MLP{0} Test.csv'.format(self.thread_ID), 'w',
                  newline='') as csvfile:
            writer = csv.writer(csvfile, delimiter=',')
            writer.writerow(result)
Example #11
0
def main(arguments: Namespace) -> None:
    batch_size = 200
    learning_rate = 2e-3
    num_classes = 2
    num_nodes = [500, 500, 500]

    # load the features of the dataset
    features = datasets.load_breast_cancer().data

    # standardize the features
    features = StandardScaler().fit_transform(features)

    # get the number of features
    num_features = features.shape[1]

    # load the labels for the features
    labels = datasets.load_breast_cancer().target

    train_features, test_features, train_labels, test_labels = train_test_split(
        features, labels, test_size=0.20, stratify=labels)

    model = MLP.MLP(alpha=learning_rate,
                    batch_size=batch_size,
                    node_size=num_nodes,
                    num_classes=num_classes,
                    num_features=num_features)

    model.train(num_epochs=arguments.num_epochs,
                log_path=arguments.log_path,
                train_data=[train_features, train_labels],
                train_size=train_features.shape[0],
                test_data=[test_features, test_labels],
                test_size=test_features.shape[0],
                result_path=arguments.result_path)
Example #12
0
 def __init__(self,stateDim:int,nHidden:int,networkUnits:int,networkActivation,useSkips=False,learningRate:float=1e-3,nHistory:int=1,lossType="L2"):
     stateIn=tf.placeholder(dtype=tf.float32,shape=[None,stateDim])
     valueIn=tf.placeholder(dtype=tf.float32,shape=[None,1])             #training targets for value network
     critic,criticInit=MLP.mlp(stateIn,nHidden,networkUnits,1,networkActivation,firstLinearLayerUnits=0,useSkips=useSkips)  #need a handle for the DenseNet instance for network switching
     diff=valueIn-critic
     if lossType=="L2":
         loss=tf.reduce_mean(tf.square(diff))    
     elif lossType=="L1":
         loss=tf.reduce_mean(tf.abs(diff))       #L1 loss, can be more stable
     elif lossType=="SoftL1":
         loss=tf.reduce_mean(softAbs(diff))       #L1 loss with zero gradient at optimum
     else:
         raise Exception("Loss type not recognized!")
     def optimize(loss):
         optimizer=tf.train.AdamOptimizer(learning_rate=learningRate)
         if not useGradientClipping:
             return optimizer.minimize(loss)
         gradients, variables = zip(*optimizer.compute_gradients(loss))
         gradients, _ = tf.clip_by_global_norm(gradients, maxGradientNorm)
         return optimizer.apply_gradients(zip(gradients, variables))
     optimizeCritic=optimize(loss)
     #remember some of the tensors for later
     self.loss=loss
     self.nHistory=nHistory
     self.history=deque()
     self.criticInit=criticInit
     self.stateIn=stateIn
     self.valueIn=valueIn
     self.initialized=False
     self.stateDim=stateDim
     self.critic=critic
     self.optimize=optimizeCritic
Example #13
0
    def train(self):
        # self.trainBuntton["state"] = 'disabled'
        #sizes, epochs, eta, update_queue, validation_threshold, error_threshold
        sizes = [int(i) for i in self.sizes.get().split(',')]
        eta = float(self.eta.get())
        epochs = int(self.epochs.get())
        et = float(self.error_threshold.get())
        vt = int(self.validation_threshold.get())
        mu = float(self.mu.get())
        bs = int(self.batch_size.get())
        activationFunc = self.activationFunc.get()
        widrow = self.widrow.get()

        if not self.plotter.is_alive():
            self.plotter.start()

        if self.mlp is not None:
            if self.mlp.exitcode != 0:
                print 'Another learner is working.'
                return
            self.mlp.terminate()

        self.mlp = MLP.Network(sizes, epochs, eta, mu, self.updatesQueue, vt,
                               et, widrow, bs, activationFunc)
        self.mlp.start()
def test(args):
    print('Welcome to the world of neural networks!')

    activation = 'sigmoid'

    if (args.dataset == 'MNIST'):
        inSize = 28 * 28
    else:
        inSize = 200 * 200

    NoImages = len(os.listdir(args.test))

    inputs = np.zeros((NoImages, inSize))
    itrNo = 0
    for imgName in os.listdir(args.test):

        imgName = args.test + '/' + imgName

        if (args.dataset == 'Cat-Dog'):
            img = util.rgb2gray(mpimg.imread(imgName))
        else:
            img = mpimg.imread(imgName)

        inputs[itrNo, :] = img.reshape(inSize)
        itrNo += 1

    network = mlp.MLNN()
    fileName = '../Model/' + args.dataset + '.npy'
    numLayers = network.loadWeights(fileName)
    network.forwardProp(inputs)
    predict = network.predict(network.nnLayers[numLayers - 1].output)
    predicted = np.argmax(predict, axis=1)
    print(predicted)
Example #15
0
 def test(self, x_test, isClassification= True):
         #test the model with test set and return predicted output for classification or regression.
         network = MLP.MLP(self.architecture)
         #set weights and biases based on SAE weights and biases
         network.setBiases(self.biases)
         network.setWeights(self.weights)
         return network.test(x_test, isClassification)
Example #16
0
def test_sll():
    model = torch.nn.Sequential(
        MLP.layer.CLinearLayer(in_features=dim,out_features=dim,dtype=torch.cdouble,bias=True),
#        MLP.activation.SplitActivation(torch.nn.Tanh()),
        MLP.activation.PhaseAmplitude(p=1,q=1),
    ).to(device)

    loss_fct = MLP.loss.LpLoss(p=1)

    # Note GPU training requires SGD as Adam uses addcmul_cuda
    optimizer = torch.optim.Adam(model.parameters(),lr=lr,weight_decay=wd)

    loss_train, loss_valid = MLP.train( train_dataLoader=train_dataLoader,
                                        valid_dataLoader=valid_dataLoader,
                                        model=model,
                                        optimizer=optimizer,
                                        loss_function=loss_fct,
                                        num_epochs=num_epochs)

    print(f"Average Inference Loss = {inference(model,loss_fct):.4e}")
    abscissa = torch.arange(num_epochs)
    plt.plot(abscissa,loss_train,'.-',c='b',label="Loss Train")
    plt.plot(abscissa,loss_valid,'.-',c='g',label="Loss Valid")
    plt.title(model.extra_repr())
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.grid()
    plt.legend()
    plt.show()
    plt.clf()
def main(arguments):

    # load the features of the dataset
    features = datasets.load_breast_cancer().data

    # standardize the features
    features = StandardScaler().fit_transform(features)

    # get the number of features
    num_features = features.shape[1]

    # load the labels for the features
    labels = datasets.load_breast_cancer().target

    train_features, test_features, train_labels, test_labels = train_test_split(
        features, labels, test_size=0.20, stratify=labels)

    model = MLP.MLP(
        alpha=LEARNING_RATE,
        batch_size=BATCH_SIZE,
        node_size=NUM_NODES,
        num_classes=NUM_CLASSES,
        num_features=num_features,
    )

    model.train(
        num_epochs=arguments.num_epochs,
        log_path=arguments.log_path,
        train_data=[train_features, train_labels],
        train_size=train_features.shape[0],
        test_data=[test_features, test_labels],
        test_size=test_features.shape[0],
        result_path=arguments.result_path,
    )
Example #18
0
    def __init__(self, len):
        r"""

        :param len: the length of input vector.
        """
        super(SubGraphLayer, self).__init__()
        self.g_enc = MLP.MLP(len, len)
Example #19
0
def main():
    imp.ImportData().importData()
    dataset = lod.LoadDataset().loading()

    train = dataset[0:30609]
    valid = dataset[30610:40812]
    test = dataset[40812:51016]

    tent = 1
    nash = None
    epochsArray = [200 for i in range(0, 10)]
    for epoch in epochsArray:

        network = mlp.MLP(epochs=epoch,
                          train_data=train,
                          valid_data=valid,
                          test_data=test,
                          learning_rate=0.01,
                          momentum=0.5,
                          rmse_min=0.04,
                          stop_params="epochs",
                          tent=tent,
                          bias=1)
        network.train()
        nash_test = network.test()

        if tent == 1:
            nash = nash_test

        print("Melhor NASH: " + str(nash) + " - NASH Tentativa " + str(tent) +
              " : " + str(nash_test))
        if nash < nash_test:
            nash = nash_test
        tent += 1
Example #20
0
    def __init__(self, data, xdim, nhidden=5, mlp_alpha=2):
        GPLVM.__init__(self, data, xdim)

        self.MLP = MLP.MLP((self.Ydim, nhidden, self.Xdim), alpha=mlp_alpha)
        self.MLP.train(
            self.GP.Y,
            self.GP.X)  #create an MLP initialised to the PCA solution...
        self.GP.X = self.MLP.forward(self.GP.Y)
Example #21
0
def tune(n):
    model = MLP((4,), training_epochs = 5000, beta=betas[n], debug = False)

    m = Model(model, transfs, gen_x, gen_y, RMSE)
    window = [1, 4, 12]
    ret = m.expanding_window(X_train, y_train, TRAIN_OFFSET, window, 'dynamic')
    print(betas[n])
    return betas[n], ret[1][3].iloc[-1, 0], ret[1][0], ret[4][0], ret[12][0]
Example #22
0
    def mutation(self):
        """Goes through the individuals in the new population and randomly changes
        certain weights by some random value"""

        new_pop = []
        #print(self.population)
        for individual in self.population:

            for i in range(len(individual)):
                if random.random() < self.mutation_rate:
                    individual[i] += random.uniform(-self.mutation_amount,
                                                    self.mutation_amount)
                    #print(individual)
            x = MLP(self.dataclass, self.class_type)
            x.set_weights(individual)
            new_pop.append(x)
        self.population = new_pop
Example #23
0
    def setup_classifier_for_trainning(self, sents, trees, labeled):
        embedding_size = self.embedding_size
        num_basic_tokens = self.num_tokens  #num_word_tokens  = 18; num_pos_tokens= 18; num_label_tokens = 12;
        hidden_size = self.hidden_size
        init_range = 0.010
        Eb_entries = 0
        Eb_entries = len(self.known_poss) + len(self.known_words)
        if labeled:
            Eb_entries += len(self.known_labels)
        #Eb=np.zeros([Eb_entries,embedding_size],float)
        Eb = np.random.rand(
            Eb_entries, embedding_size
        )  #init Embeddings randomly for words,poss and labels
        Eb = (Eb * 2 - 1) * init_range
        #W1_ncol=embedding_size*num_basic_tokens
        W1_ncol = self.config.input_length
        W1_init_range = np.sqrt(6.0 / (W1_ncol + hidden_size))
        #W1=np.zeros([hidden_size,W1_ncol],float)
        W1 = np.random.rand(hidden_size, W1_ncol)
        W1 = (W1 * 2 - 1) * W1_init_range
        #b1=np.zeros(hidden_size,float)
        b1 = np.random.rand(hidden_size)
        b1 = (b1 * 2 - 1) * W1_init_range
        if labeled:
            n_actions = len(self.known_labels) * 2 + 1
        else:
            n_actions = 3
        W2_init_range = np.sqrt(6.0 / (n_actions + hidden_size))
        #W2=np.zeros([n_actions,hidden_size],float)
        W2 = np.random.rand(n_actions, hidden_size)
        W2 = (W2 * 2 - 1) * W2_init_range

        #match embeding dictionary
        in_embed = 0
        Eb_nrows = Eb_entries
        Eb_ncols = embedding_size
        for i in range(Eb_nrows):
            index = -1
            if (i < len(self.known_words)):
                #print self.known_words[i]
                word = self.known_words[i]
                if word in self.embed_ids:
                    index = self.embed_ids[word]
                elif word.lower() in self.embed_ids:
                    index = self.embed_ids[word.lower()]
            if index >= 0:
                in_embed += 1
                Eb[i] = self.embeddings[index]
        print "---Setup Classifier---"
        print "found embeddings:", in_embed, "/", len(self.known_words)
        dataset = self.gen_train_samples(sents, trees)
        print "creating classifier (", self.config.input_length, ",", self.hidden_size, ",", n_actions, ")"
        #classifier=NNClassifier(dataset,Eb,W1,b1,W2,self.pre_computed_ids)
        (features, labels) = self.preprocess_dataset(dataset)
        #self.classifier=MLP.MLP([self.embedding_size*self.num_tokens,self.hidden_size,n_actions],Eb,W1,b1,W2,self.pre_computed_ids,features,labels)
        self.classifier = MLP.MLP(
            [self.config.input_length, self.hidden_size, n_actions], Eb, W1,
            b1, W2, self.pre_computed_ids, features, labels)
Example #24
0
 def test(self):
     multi = MLP()
     multi.read_excel('ObjectRecognition2.xls')
     multi.read_xml("MLP.xml")
     acc, conf = multi.test()
     print(acc)
     print(conf)
     self.do_message()
Example #25
0
    def __init__(self, X, Y, Zdim, lda, nhidden=5, mlp_alpha=2):
        GPLVM_z_lnhsic.__init__(self, X, Y, Zdim)

        self.MLP = MLP.MLP((self.Xdim + self.Ydim, nhidden, self.Zdim),
                           alpha=mlp_alpha)
        self.MLP.train(
            self.GP_z.XY,
            self.GP_z.Z)  #create an MLP initialised to the PCA solution
        self.GP_z.Z = self.MLP.forward(self.GP_z.XY)
        self.lda_2_mlp = lda
Example #26
0
 def __init__(self, len, timeStampNumber):
     r"""
     Construct a VectorNet with predicting.
     :param len: same as VectorNet.
     :param timeStampNumber: the length of time stamp for predicting the future trajectory.
     """
     super(VectorNetWithPredicting, self).__init__()
     self.vectorNet = VectorNet(len=len)
     self.trajDecoder = MLP.MLP(inputSize=self.vectorNet.pLen,
                                outputSize=timeStampNumber * 2,
                                noReLU=False)
Example #27
0
 def __init__(self,NetLayers,ImageSize):
     self.MyNet = net.MLP(NetLayers)
     self.TrainingData = []
     self.TrainingResult = []
     self.Size = ImageSize
     self.Iteration = 50
     self.BatchTask = 10
     self.Epselom = 0.5
     if((ImageSize[0]*ImageSize[1]) != NetLayers[0]):
         print '>> Image Size and Input layer should Match'
         exit()
Example #28
0
    def _depthOfScene(self, hiddenLayers, activationFuncs, segments):
        """
		Calculate depth of scene using our method. segments to calculate depths on 
		has to be chosen with belonging pickleFile

		Args:
			hiddenLayers([int])	: hidden layers for MLP calculating the depths
			activationFuncs     : activation functions for MLP scene image
		  	  (Tensorflow.nn.'func') 
		  	segmentNum(int)		: The segments to be used in the depth calculations
		"""

        usedSegments = segments

        optimizerFuncs = [
            tf.train.AdamOptimizer(0.1),
            tf.train.AdamOptimizer(0.01),
            tf.train.AdamOptimizer(0.001),
            tf.train.AdamOptimizer(0.0001)
        ]
        lossFunc = tf.losses.absolute_difference

        #Create the mlp data
        mlpDataProsessor = MLPDataProsessor.MLPDataProsessor(
            self.imgPathL,
            self.imgPathR,
            usedSegments=usedSegments,
            picklePath=self.picklePath)
        mlpDataProsessor.normalizeMLPData()
        inputMLP, targetMLP, inputTestMLP, targetTestMLP = mlpDataProsessor.getMLPData(
        )
        mlp = MLP.MLP(len(inputMLP[0]), 1, hiddenLayers, optimizerFuncs,
                      activationFuncs, lossFunc)

        #Train the mlp and format the data
        mlp.train(inputMLP,
                  targetMLP,
                  batchSize=100,
                  earlyStopEpochs=3,
                  iterations=75,
                  maxEpochs=2000,
                  verbose=True,
                  kFoldValidationSize=0.20,
                  epochsBetweenPrint=10)
        array = mlpDataProsessor.convertSegmentCoordinatesToArray(usedSegments)
        result = mlp.forward(array)
        resultDenormalized = mlpDataProsessor.denormalizeTarget(result)
        resultImage = mlpDataProsessor.convertResultArrayToImage(
            resultDenormalized, usedSegments)

        resultImage = self._interpolateEdges(resultImage)
        #make sky 10 times max depth
        resultImage[resultImage == 0] = resultImage.max() * 10
        return resultImage
Example #29
0
    def mutation(self):
        """Goes through the individuals in the new population and randomly changes
        certain weights by some random value"""

        #Creates a new population to hold the neural networks
        new_pop = []

        #Iterates over the list of weights in the population
        for individual in self.population:
            #Iterates through each weight
            for i in range(len(individual)):
                #Around %10 percent of the time, mutate a gene by a small amount
                if random.random() < self.mutation_rate:
                    individual[i] += random.uniform(-self.mutation_amount,
                                                    self.mutation_amount)
            #Create a neural network from the weights
            x = MLP(self.dataclass, self.class_type)
            x.set_weights(individual)
            new_pop.append(x)
        self.population = new_pop
Example #30
0
    def selection(self):
        """Uses differences in fitness between candidate vectors to determine whether or not to replace"""

        selected_pop = []
        for i in range(self.pop_size):
            target_vector = self.population[i]

            diff1, diff2, candidate1 = random.choice(
                self.population), random.choice(
                    self.population), random.choice(self.population)
            while diff1 == diff2 or diff1 == candidate1 or diff2 == candidate1 or candidate1 == target_vector or diff1 == target_vector or diff2 == target_vector:
                # make sure diff1 not equal diff 2 not equal candidate not equal target
                diff1, diff2, candidate1 = random.choice(
                    self.population), random.choice(
                        self.population), random.choice(self.population)

            diff1_weights, diff2_weights, candidate1_weights = diff1.get_weights(
            ), diff2.get_weights(), candidate1.get_weights()
            diff_weights = []
            for i in range(len(diff1_weights)):
                diff_weights.append(diff1_weights[i] - diff2_weights[i])

            trial_weights = []
            for i in range(len(candidate1_weights)):
                trial = candidate1_weights[i] + (self.beta * diff_weights[i])
                trial_weights.append(trial)

            for i in range(len(trial_weights)):
                if random.random(
                ) < self.crossover_rate:  # then we'll cross over
                    trial_weights[i] = target_vector.get_weights()[i]

            trial_homie = MLP(self.dataclass, self.class_type)
            trial_homie.set_weights(trial_weights)
            if trial_homie.test(self.df) < target_vector.fitness:
                selected_pop.append(trial_homie)

            else:
                selected_pop.append(target_vector)

        self.population = selected_pop
Example #31
0
def predict(chartID = 'chart_ID', chart_type = 'line', chart_height = 350):
    if request.method == 'POST':
        listtime = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
        liststock = svm.svm_predict(15, 0, str(request.form['stockid']))
        liststock1 = MLP.mlp_predict(15, 0, str(request.form['stockid']))
        chart = {"renderTo": chartID, "type": chart_type, "height": chart_height,}
        series = [{"name": 'SVM', "data": liststock}, {"name": 'MLP', "data": liststock1}]
        # return '<h3>please log in firstly.</h3>'
        title = {"text": 'Price in future 15 days'}
        xAxis = {"categories": listtime}
        yAxis = {"title": {"text": 'yAxis Label'}}
        return render_template('predict.html', chartID=chartID, chart=chart, series=series, title=title, xAxis=xAxis,
                               yAxis=yAxis)
    return render_template('predict.html')
Example #32
0
def run_with_mlp(image_filename="../Wheat_Images/004.jpg", ser_filename=None):
    '''
	Estimates the number of grains in a given image using a
	Multilayer Perceptron neural network.

	Args:
		image_filename: The path to the image from which a grain count
			is to be obtained.

		ser_filename: path to serialized list of isub-images already extracted
		from the image from which a grain count is to be obtained.

	Returns:
		count: An estimate of the number of grains in the provided image.
    '''
    global img_data

    # Chop image up into sub-images and serilaise or just load serialised data if
    # it already exists.
    if(ser_filename == None and image_filename == "../Wheat_Images/004.jpg"):
		ser_filename = "../Wheat_Images/xxx_004.data"
    if(Helper.unserialize(ser_filename) == None):
        img = img_as_ubyte(io.imread(image_filename))
        roi_img = spectral_roi.extract_roi(img, [1])
        Helper.block_proc(roi_img, (20,20), blockfunc)
        #Helper.serialize(ser_filename, img_data)
    else:
        img_data = Helper.unserialize(ser_filename)

    # classify
    #MLP.build_model('glcm', iters=30, glcm_isMultidirectional=True)
    r = MLP.classify(img_data, featureRepresentation='glcm', shouldSaveResult=True)

    # Count number of '1s' in the result and return
    count = r.tolist().count(1)
    print("COUNT: {}".format(count))
    return count
Example #33
0
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]

n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

###############
# build model #
###############
print '... building the model'
index = T.lscalar()	# index to minibatch
x = T.matrix('x')	# data of rasterized images
y = T.ivector('y')	# labels are 1D vector of int
rng = numpy.random.RandomState(1234)
classifier = MLP(rng=rng, input=x, n_in=28*28, n_hidden=n_hidden, n_out=10) # MLP with Logistic regression classifier
# cost to minimize during training
cost = classifier.negative_log_likelihood(y) \
	+ L1_reg * classifier.L1 \
	+ L2_reg * classifier.L2_sqr

test_model = theano.function(inputs=[index],
		outputs=classifier.errors(y),
		givens={x: test_set_x[index * batch_size: (index + 1) * batch_size],
			y: test_set_y[index * batch_size: (index + 1) * batch_size]})

valid_model = theano.function(inputs=[index],
		outputs=classifier.errors(y),
		givens={x: valid_set_x[index * batch_size: (index + 1) * batch_size],
			y: valid_set_y[index * batch_size: (index + 1) * batch_size]})
Example #34
0
    :param labels_dense:
    :param num_classes:
    :return:
    """
    num_labels = labels_dense.shape[0]
    index_offset = numpy.arange(num_labels) * num_classes
    labels_one_hot = numpy.zeros((num_labels, num_classes))
    labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
    return labels_one_hot


if __name__ == "__main__":
    # fetch data from sklearn
    mnist = fetch_mldata('MNIST original')
    train_data = mnist.data[0:60000, :] / 255. # normalize
    train_label = mnist.target[0:60000].astype(dtype=int)
    train_label_one_hot = dense_to_one_hot(train_label).astype(dtype=float)
    test_data = mnist.data[60000:70000, :] / 255. # normalize
    test_label = mnist.target[60000:70000].astype(dtype=int)
    test_label_one_hot = dense_to_one_hot(test_label).astype(dtype=float)

    # construct network
    mlp = MLP(layer_sizes=[train_data.shape[1], 256, 10],
              layer_types=['sigmoid', 'softmax'],
              uniform_init=False)
    mlp.train(train_data, train_label_one_hot)

    print 'Test error: %f' % mlp.test(test_data, test_label_one_hot)


Example #35
0
# -*- coding: utf-8 -*-
import sys
from sklearn.datasets import fetch_mldata
from MLP import *


if __name__ == "__main__":
    # fetch data from sklearn
    mnist = fetch_mldata('MNIST original')
    train_data = mnist.data[0:60000, :] / 255. # normalize
    train_label = mnist.target[0:60000].astype(int)
    test_data = mnist.data[60000:70000, :] / 255. # normalize
    test_label = mnist.target[60000:70000].astype(int)

    # construct network
    mlp = MLP(layer_sizes=[train_data.shape[1], 256, 10],
              layer_types=['sigmoid', 'softmax'],
              uniform_init=False)
    mlp.train(train_data, train_label)
    print 'Test error: %f' % mlp.test(test_data, test_label)
Example #36
0
pca = PCA(n_components = 700)

X_train = pca.fit_transform(X_train)
print X_train.shape
X_test = pca.transform(X_test)
print "finish pca"
inodes = 700 #X_train.shape[1]
hnodes = 400
onodes = 10
n_iter = 1500
eta = 0.001
minibatches = 200
lamda2 = 0.1
lamda1 = 0.00

mlp = MLP(inodes = inodes, hnodes = hnodes , onodes = onodes, eta  = eta ,n_iter=n_iter,minibatches=minibatches,lamda2=lamda2,lamda1=lamda1,learning_curve=True)

print "hnodes %s n_iter %s eta %s minibatch %s lamda2 %s lamda1 %s" %(hnodes,n_iter,eta,minibatches,lamda2,lamda1)

mlp.fit(X_train.T,y_train.T)

#error_graph(n_iter,mlp)

y_predict = mlp.predict(X_test.T)
y_original = y_predict
y_predict = np.where(y_predict>=0.5,1,0)

count = 0
miscount = 0
for i in range(y_predict.shape[1]):
    if np.array_equal(y_predict[:,i],y_test.T[:,i]):
          # we don't want too recent data...might be sparse
          continue
        if utime > end - 2*period:
          # this is the point we'll try to predict 
          dataY[-1] += n
        else:
          dataX[-1][int((utime-start)/period)] +=  n
    uncertaintyX.append(np.mean(dataX[-1]))
    dataX[-1] = dataX[-1] - uncertaintyX[-1]
    dataY[-1] = dataY[-1] - uncertaintyX[-1]
    uncertaintyX[-1] = np.sqrt(uncertaintyX[-1])
    if dataY[-1] > uncertaintyX[-1]:
      dataY[-1] = 2
    elif dataY[-1] < uncertaintyX[-1]:
      dataY[-1] = 0
    else:
      dataY[-1] = 1
  dataY = np.array(dataY).astype(int)
  dataX = np.array(dataX)
  uncertaintyX = np.array(uncertaintyX)

x = T.matrix('x')
rng = np.random.RandomState()
classifier = MLP(x,rng,dataX.shape[1],40,1)
trainer = classifier.getTrainer(1,1)
nData = dataX.shape[0]
for i in range(nData/3*2/50):
  r =  trainer(trainX[i*50:(i+1)*50],trainY[i*50:(i+1)*50][:,0],.1)
  print r
print classifier.errors(trainX[nData/3*2:],trainY[nData/3*2:][:,0])
# print classifier.evaluate(trainX[999990:]),trainY[999990:].T