Example #1
0
def L_layer_model(X,
                  Y,
                  layers_dims,
                  learning_rate=0.5,
                  num_iterations=10000,
                  print_cost=False):  #lr was 0.009

    np.random.seed(1)
    costs = []

    parameters = functions.initialize_parameters_deep(layers_dims)

    for i in range(0, num_iterations):

        AL, caches = functions.L_model_forward(X, parameters)

        cost = functions.compute_cost(AL, Y)

        grads = functions.L_model_backward(AL, Y, caches)

        parameters = functions.update_parameters(parameters, grads,
                                                 learning_rate)

        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    #plt.ylabel('cost')
    #plt.xlabel('iterations (per tens)')
    #plt.title("Learning rate =" + str(learning_rate))
    #plt.show()

    return parameters
Example #2
0
def L_layer_model(
        X, y, layers_dims, learning_rate=0.01, num_iterations=3000,
        print_cost=True, hidden_layers_activation_fn="relu"):
    random.seed(version =2)
    np.random.seed(random.randint(0,1000))
    
    parameters = fn.initialize_parameters(layers_dims)
    for i in range(num_iterations):
    
        AL, caches = fn.L_model_forward(
            X, parameters, hidden_layers_activation_fn)

    
        cost = fn.compute_cost(AL, y)

    
        grads = fn.L_model_backward(AL, y, caches, hidden_layers_activation_fn)

    
        parameters = fn.update_parameters(parameters, grads, learning_rate)

    
        if (i + 1) % 100 == 0 and print_cost:
            print(f"The cost after {i + 1} iterations is: {cost:.4f}")
    return parameters
Example #3
0
plt.ylabel('Profit in $10,000s')
plt.show()

input('Program paused. Press enter to continue.\n')
plt.close()

# ########## Part3: Gradient descent ##########
print('Running Gradient Descent ...\n')

X = np.hstack((np.ones((m, 1)), X))
theta = np.zeros((2, 1))

iterations = 1500
alpha = 0.01

print(compute_cost(X, y, theta))

theta = gradient_descent(X, y, theta, alpha, iterations)
print('Theta found by gradient descent:')
print(theta[0, 0], theta[1, 0], '\n')

plt.scatter(X[:, 1], y, color='red', marker='x', label='Training data')
plt.xlim([4, 24])
plt.ylim([-5, 25])
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s')
plt.plot(X[:, 1], np.dot(X, theta), label='Linear regression')
plt.legend(loc='lower right', scatterpoints=1)
plt.show()

predict1 = np.dot(np.array([[1, 3.5]]), theta)[0, 0]
Example #4
0
import functions

test_x = predictionData.inputData[:, 4000:4600]
test_y = predictionData.outputData[:, 4000:4600]

print(np.shape(test_y))

initial_val = test_x[:, 1]


def MovingAverage(inputArray):
    #weights = np.array([1.0/21, 2.0/21, 3.0/21, 4.0/21, 5.0/21, 6.0/21])
    #inputArray = np.multiply(inputArray, weights)
    return np.average(inputArray)


predict_y = []
inputArray = initial_val
print(inputArray)
for i in range(0, np.shape(test_x)[1]):
    print(inputArray)
    y = MovingAverage(test_x[:, i:i + 1])
    #inputArray = np.append(inputArray[1:], y)
    predict_y.append(y)

cost = functions.compute_cost(predict_y, test_y)
averageError = functions.averageError(predict_y, test_y)

print(cost)
print(averageError)
Example #5
0
#         break

# sys.exit()

###
lamda = 0.005 #Learning rate
iteration = 2500
###
W1, b1, W2, b2, W3, b3 = functions.initialize_parameters(h1=128,h2=64)

for i in range(0, iteration):
    A1, cache_1 = functions.linear_propagate(train_x, W1, b1, 'relu')
    A2, cache_2 = functions.linear_propagate(A1, W2, b2, 'relu')
    Y_hat, cache_3 = functions.linear_propagate(A2, W3, b3, 'softmax')

    cost = functions.compute_cost(Y_hat, train_y)
    pb = functions.accuracy(Y_hat, train_y)
    
    print(cost)



    # dA3 = -1 * (np.divide(train_y, Y_hat) - np.divide(1 - train_y, 1 - Y_hat))
    dA3 = Y_hat - train_y

    dA2, dW3, db3 = functions.linear_backpropagate(dA3, cache_3, 'softmax')
    dA1, dW2, db2 = functions.linear_backpropagate(dA2, cache_2, 'relu')
    dA0, dW1, db1 = functions.linear_backpropagate(dA1, cache_1, 'relu')

    #Update W
    W1 = W1 - (lamda * dW1)