コード例 #1
0
 def __init__(self, **kwargs):
     super(TestApp, self).__init__(**kwargs)
     self.size = Window.size
     print 'Window Resolution: ', self.size
     self.lay  = layers(x=self.size[0], y=self.size[1])
     self.lay.refresh()
     self.img = AsyncImage(source='layers.png')
     self.add_widget(self.img)
コード例 #2
0
ファイル: pylayers.py プロジェクト: sansajn/pylayers
	def __init__(self, args):
		QtGui.QMainWindow.__init__(self)		
		self.docks = []
		self.docks_state = True
	
		self.layers = layers.layers(self, args)
		self.setCentralWidget(self.layers)
		
		self.resize(800, 600)
コード例 #3
0
ファイル: optimizer.py プロジェクト: Ryuhos/NN
 def Gradient_sigmoid(self, epochs, lr):
     for i in range(epochs):
         self.result = layers(self.x, self.y, self.w)
         pp = self.result.layer_sigmoid()
         chain_rule = self.y * self.result.diff[-1]
         for j in range(len(self.w) - 1):
             self.w[-(j + 1)] = self.w[-(
                 j + 1)] - lr * self.result.H[-(j + 2)].T.dot(chain_rule)
             chain_rule = chain_rule.dot(
                 self.w[-(j + 1)].T) * self.result.diff[-(j + 2)]
         self.w[0] = self.w[0] - lr * self.x.T.dot(chain_rule)
         current_loss = LossFunction(self.y, pp)
         self.loss.append(current_loss.cross_entropy())
         if i % 10 == 0:
             print(W, self.result.pred_y)
     return [W, self.result.pred_y]
コード例 #4
0
fn = "permx.tmpl"
fn2 = "permx.txt"
DISTR = "LOGNORMAL"
permUtsira = np.log(2000)
permvarUtsira = 0.4
permMinMaxUtsira = [1100, 5000]
permShale = np.log(0.001)
permvarShale = 0.4
permMinMaxShale = [0.00075, 0.0015]
dimx = 64
dimy = 118
dimz = 263

keys = ["PERMX"] #,"PORO"]
layerShale, layerUtsira, mapShale, mapUtsira = layers()
numLayers = len(layerShale)
f = open(fn, "w")
f2 = open(fn2, "w") 
for key in keys:
  for layer in range(numLayers):
    f.write("EQUALS \n")
    f.write("'" + key + "'")
    f.write(" <UTSIRA" + key + str(layer+1) + ">")
    f.write(" 1 " + str(dimx) + " 1 " + str(dimy) + " ") 
    f.write(str(layerShale[layer]+1) + " " + str(layerUtsira[layer])) 
    f.write("/\n/\n\n")
    
    f2.write("UTSIRA" + key + str(layer+1) + " " + DISTR + " " + str(permUtsira) + " " + str(permvarUtsira) + "\n")

for key in keys:
コード例 #5
0
 def updateDisplay(self, *args):
     if not(self.size[0] == Window.size[0]) or not(self.size[1] == Window.size[1]):
         self.size = Window.size
         self.lay  = layers(x=self.size[0], y=self.size[1])
         print 'Window Resolution: ', self.size
     self.lay.refresh()
コード例 #6
0
    def train(self):
        '''
         " This function considered as the integration of all the past Modules together to start training any model
           the deep learning engineer will have the option to choose :
           1- the layers dimensions
           2- the activation function of each layer
           3- the loss type
           4- the number of iterations
          this function will plot live graph for the training cost and finally will print the the accuracy resulted
           from the test set training using the parameters resulted from the training set training .
        :return: The Trained parameters
        '''

        temp_layers = layers.layers(self.dimension_layers)
        # Initialize parameters dictionary.
        parameters = temp_layers.layers_init()
        #print (parameters)
        #print (parameters)
        #print("weights:")
        #print(parameters["W3"].shape)

        # Loop (gradient descent)
        cost_file = open("costs.txt", 'a+')
        cost_file.truncate(0)
        cost_file.close()
        for i in range(0, self.no_of_iterations):
            predictions, packet_of_packets = forward_model.forward_model(
            ).forward_model(self.input, parameters, self.activation_functions)
            #print(predictions.shape)
            #print (predictions)

            # Cost function
            if self.regularization_parameter == 0:
                cost = Losses.multiclass_loss(self.Y, predictions).cost()
                #print(cost)
            else:
                cost = Losses.regularization.compute_cost_with_regularization(
                    predictions, self.Y, parameters,
                    self.regularization_parameter, "multiclass")

            # Backward propagation.
            grads = backward_model.model_backward_general(
                predictions, self.Y, packet_of_packets, "multiclass",
                self.regularization_parameter,
                self.activation_functions).model_backward()
            # Update parameters.
            parameters = self.update_parameters(parameters, grads,
                                                self.learning_rate)
            # plot the cost
            #costs.append(cost)
            #self.visualize_cost(i,cost)
            # ani = animation.FuncAnimation(self.fig,self.draw,interval=1000)
            #plt.show()
            cost_file = open("costs.txt", 'a+')
            cost_file.write(f"{i},{cost} \n")
            cost_file.close()
            plt.ion()
            plt.ylabel('cost')
            plt.xlabel('iterations')
            plt.title("Learning rate =" + str(self.learning_rate))
            plt.show()
            plt.draw()
            plt.pause(1)
            '''
            plt.figure()
            plt.plot(costs)
            plt.ylabel('cost')
            plt.xlabel('iterations')
            plt.title("Learning rate =" + str(self.learning_rate))
            plt.show()
            '''
            # Print the loss every 10000 iterations
            if self.print_cost and i % 10 == 0:
                print("Cost after iteration {}: {}".format(i, cost))
        '''
        # plot the cost
        plt.figure()
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations')
        plt.title("Learning rate =" + str(self.learning_rate))
        plt.show()
        '''
        return parameters
コード例 #7
0
    def train(self):
        '''
                " This function considered as the integration of all the past Modules together to start training any model
                  the deep learning engineer will have the option to choose :
                  1- the activation function of each layer
                  2- the loss type
                  3- the number of iterations
                 this function will plot live graph for the training cost and finally will print the the accuracy resulted
                  from the test set training using the parameters resulted from the training set training .
               :return: The Trained parameters
               '''
        adam_flag = 1
        #m = self.input.shape[1]  # number of examples
        layers_dimensions = [self.input[0][0].shape[0], 128, 10]
        temp_layers = layers.layers(layers_dimensions)
        # temp_forward = forward_model.forward_model(layers_dimensions)
        # Initialize parameters dictionary.
        parameters = temp_layers.layers_init()
        temp = parameters
        if (self.momentum_or_no):
            velocity = momentum(parameters).velocity_preparation()
        if (self.adam_or_not):
            exponentially_weighted_parameter, RMS_parameter = ADAM(
                parameters).adam_preparation()
        #print (parameters)
        #print (parameters)
        #print("weights:")
        #print(parameters["W3"].shape)

        # Loop (gradient descent)
        cost_file = open("costs.txt", 'a+')
        cost_file.truncate(0)
        cost_file.close()
        for i in range(0, self.no_of_iterations):
            #if (self.momentum_or_no):
            #   velocity = momentum(parameters).velocity_preparation()
            #if (self.adam_or_not):
            #   exponentially_weighted_parameter, RMS_parameter = ADAM(parameters).adam_preparation()
            for j in range(len(self.input)):
                train_Y = data_set.labels_to_onehot(self.input[i][1])
                train_X = self.input[i][0]
                # scaler = StandardScaler()
                # train_X=scaler.fit_transform(train_X)
                #print(train_X.shape)
                no_of_training_examples = self.input[i][1].shape[1]

                predictions, packet_of_packets = forward_model.forward_model(
                ).forward_model(train_X, parameters, self.activation_functions)

                # print("DONE")

                # Cost function
                if self.regularization_parameter == 0:
                    #print(predictions)
                    cost = Losses.multiclass_loss(train_Y, predictions).cost()
                # print(cost)
                else:
                    cost = Losses.regularization(
                    ).compute_cost_with_regularization(
                        predictions, train_Y, parameters,
                        self.regularization_parameter, "multiclass")

                # Backward propagation.
                #assert (self.regularization_parameter == 0 )  # it is possible to use both L2 regularization and dropout,
            # but this assignment will only explore one at a time
                grads = backward_model.model_backward_general(
                    predictions, train_Y, packet_of_packets, "multiclass",
                    self.regularization_parameter,
                    self.activation_functions).model_backward()
                # Update parameters.
                if (self.momentum_or_no):
                    parameters, velocity = momentum(
                        parameters).update_with_momentum(
                            velocity, self.learning_rate, self.Beta, grads)

                elif (self.adam_or_not):
                    parameters, exponentially_weighted_parameter, RMS_parameter = ADAM(
                        parameters).update_with_adam(
                            exponentially_weighted_parameter, RMS_parameter,
                            self.learning_rate, parameters, grads, i)

                else:
                    parameters = self.update_parameters(
                        parameters, grads, self.learning_rate)
            # plot the cost
            #costs.append(cost_avg)
            cost_file = open("costs.txt", 'a+')
            cost_file.write(f"{i},{cost} \n")
            cost_file.close()
            plt.ion()
            plt.show()
            plt.draw()
            plt.pause(1)
            print(f"cost after epoch{i}: {cost}")
            #print(grads)
        # plt.figure()
        # plt.plot(costs)
        # plt.ylabel('cost')
        # plt.xlabel('iterations')
        # plt.title("Learning rate =" + str(self.learning_rate))
        #plt.show()

        return parameters