示例#1
0
    def model(X_train, Y_train, layers_dims, learning_rate, num_iter, lambd,
              print_cost):

        with tf.device('/device:GPU:0'):

            tf.reset_default_graph(
            )  # to be able to rerun the model without overwriting tf variables
            (
                n_x, m
            ) = X_train.shape  # Number of features and number of training examples
            n_y = Y_train.shape[0]  # Number of classes
            n_hidden_layers = len(layers_dims)  # Number of hidden layers
            costs = []  # Keep track of the cost

            ### Create Placheholders ###
            X, Y = create_placeholders(n_x, n_y)

            ### Initialize Parameters ###
            parameters = init_params(layers_dims)

            ### Foward propagation - Build the forward propagation in the tensorflow graph ###
            ZL = forward_propagation(X, parameters)

            ### Cost - Add cost function to tensorflow graph ###
            cost_function = compute_cost(ZL, Y, parameters, n_hidden_layers,
                                         lambd, m)

            ### Backpropagation - Define the tensorflow optimizer ###
            optimizer = tf.train.AdamOptimizer(
                learning_rate=learning_rate).minimize(cost_function)
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost_function)

            ### Initializer all the variables ###
            init = tf.global_variables_initializer()

            ### Start the session to compute the tensorflow graph ###
            with tf.Session() as sess:

                # Run the initialization
                sess.run(init)

                # Do Training lopp #
                for i in range(num_iter):

                    # Run the session to execute the optimizer and the cost
                    _, cost_value = sess.run([optimizer, cost_function],
                                             feed_dict={
                                                 X: X_train,
                                                 Y: Y_train
                                             })

                    # Print the cost every 1000 iterations
                    #if print_cost == True and i % 1000 == 0:
                    #    print ("Cost after iteration %i: %f" % (i, cost_value))
                    if print_cost == True and i % 1000 == 0:
                        costs.append(cost_value)
                # Save the parameters in a variable
                parameters = sess.run(parameters)

        return parameters, costs
示例#2
0
def main(table, X, Y, hidden_layer_size, lamda, params_1_file, params_2_file):

    input_layer_size = len(X[0])
    num_labels = 1
    m = len(X)

    # =========== Initializing Parameters ================

    initial_params_1 = init_params(input_layer_size, hidden_layer_size)
    initial_params_2 = init_params(hidden_layer_size, num_labels)

    #Unrolling into a single vector of parameters
    nn_params = np.concatenate(
        ((initial_params_1.T).ravel(), (initial_params_2.T).ravel()),
        axis=None)

    # ========= Training NN ===================

    result = minimize(nn_cost_function,
                      nn_params,
                      jac=True,
                      args=(input_layer_size, hidden_layer_size, num_labels, X,
                            Y, lamda))

    #Reshaping nn_params back into the parameters params_1 and params_2
    params_1 = (result.x)[0:hidden_layer_size * (input_layer_size + 1)]
    params_1 = (np.reshape(params_1,
                           (input_layer_size + 1, hidden_layer_size))).T
    params_2 = (result.x)[hidden_layer_size * (input_layer_size + 1):]
    params_2 = (np.reshape(params_2, (hidden_layer_size + 1, -1))).T

    sys.stdout = open(params_1_file, 'w')
    for val in params_1:
        print(*val)

    sys.stdout = open(params_2_file, 'w')
    for val in params_2:
        print(*val)

    sys.stdout = sys.__stdout__
示例#3
0
       num_iter, # Number of gradient descent iterations
       lambd, # Some variable used for regularising the L2Norm cost function
       print_cost # Boolean for returning the cost function value after training):
 
 with tf.device('/device:GPU:0'):
   tf.reset_default_graph() # to be able to rerun the model without overwriting tf variables
   (n_x, m) = X_train.shape # Number of features and number of training examples
   n_y = Y_train.shape[0] # Number of classes
   n_hidden_layers = len(layers_dims) # Number of hidden layers
   costs = [] # Keep track of the cost
   
   ### Create Placheholders ###
   X, Y = create_placeholders(n_x,n_y)
   
   ### Initialize Parameters ###
   parameters = init_params(layers_dims)
           
   ### Foward propagation - Build the forward propagation in the tensorflow graph ###
   ZL = forward_propagation(X,parameters)
   
   ### Cost - Add cost function to tensorflow graph ###
   cost_function = compute_cost(ZL,Y,parameters,n_hidden_layers,lambd,m)
   
   ### Backpropagation - Define the tensorflow optimizer ###
   optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost_function)
   #optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost_function)
   
   ### Initializer all the variables ###
   init = tf.global_variables_initializer()
   
   ### Start the session to compute the tensorflow graph ###