Exemplo n.º 1
0
    def gradient_descent(self, X, Y, param, activation_type=None):

        if self.print_cost:
            cu.heading("Gradient Descent")

        if activation_type is None:
            activation_type = self.get_activation_type()

        list_cost = []
        for i in range(self.iter_count):

            AL, cache = self.model_forward_propagation(
                X, param, activation_type=activation_type)

            cache = self.model_back_propagation(Y, param, cache,
                                                activation_type)

            param = self.update_param(param, cache, self.learning_rate)

            if self.print_cost and (i % 100 == 0 or i == self.iter_count - 1):
                cost = self.cost_function(Y, AL, param)
                print("Cost after iteration [%4i] = %2.4f" % (i, cost))
                list_cost.append(cost)

        return AL, cost, param
Exemplo n.º 2
0
def main():
    init_plot()    
    
    X_train, Y_train, X_dev, Y_dev = organize_image_data()
    
    # Normalize the image data loaded
    X_train, X_dev = NeuralNetwork.normalize_image_data ([X_train, X_dev])
    
    # Hyper parameter: Neural network layer sizes
    n_hidden = ((20, 7, 5))
    
    # Neural Network
    nn = NeuralNetwork (n_hidden, print_cost=True)
    AL, cost = nn.fit(X_train, Y_train)
    
    # Train accuracy 
    cu.heading ("Train Neural Network")
    Ycap_train = nn.predict(X_train)
    accuracy   = NeuralNetwork.get_accuracy(Y_train, Ycap_train)
    print ("Train accuracy = %3.4f" %(accuracy))
    
    # Dev acccuracy
    cu.heading ("Dev Run Using Trained Parameters")
    Ycap_dev = nn.predict(X_dev)
    accuracy = NeuralNetwork.get_accuracy (Y_dev, Ycap_dev)
    print ("Dev accuracy = %3.4f" %(accuracy))
Exemplo n.º 3
0
def organize_image_data():    
    cu.heading ("Organize Data")
    
    # Load data from file
    train_x_orig, train_y_orig, test_x_orig, test_y_orig, classes = data_source.load_cat_data()
    print ("Shape: train_x_orig = ", train_x_orig.shape, " test_x_orig = ", test_x_orig.shape)
    print ("Shape: train_y_orig = ", train_y_orig.shape, " test_y_orig = ", test_y_orig.shape)
    
    # Assert image size (width, height) for train and dev are same
    m_train, width, height, rgb = train_x_orig.shape
    m_dev = test_x_orig.shape[0]
    n_x = width * height * 3
    n_y = train_y_orig.shape[0]
    assert (test_x_orig.shape == ((m_dev, width, height, 3)))
    assert (test_y_orig.shape == ((n_y, m_dev)))
    
    # Reshape to get training data - Each column will be an image 
    X_train = train_x_orig.reshape(train_x_orig.shape[0], -1).T
    Y_train = train_y_orig
    print ("Shape: X_train = ", X_train.shape, " Y_train = ", Y_train.shape)
    assert (X_train.shape == ((n_x, m_train)))
    assert (Y_train.shape == ((n_y, m_train)))
    
    # Reshape to get dev data - Each column will be an image
    X_dev = test_x_orig.reshape(test_x_orig.shape[0], -1).T
    Y_dev = test_y_orig
    print ("Shape: X_dev = ", X_dev.shape, " Y_dev = ", Y_dev.shape)    
    assert (X_dev.shape == ((n_x, m_dev)))
    assert (Y_dev.shape == ((n_y, m_dev)))
    
    return X_train, Y_train, X_dev, Y_dev
Exemplo n.º 4
0
def main():
    A = np.random.randint(1, 6, size=(4, 3))
    B = np.random.randint(1, 6, size=(3, 5))

    util.heading("Matrix")
    print("A:\n", A)
    print("B:\n", B)
    C = matmul(A, B)
    print("C:\n", C)
Exemplo n.º 5
0
 def init_weight_bias(self):
     cu.heading("Init Weight & Bias")
     L = len(self.n) - 1
     param = {}
     for l in range(1, L + 1):
         # param["W" + str(l)] = np.random.randn(n[l], n[l-1]) * 0.01
         param["W" + str(l)] = np.random.randn(
             self.n[l], self.n[l - 1]) / np.sqrt(self.n[l - 1])
         param["b" + str(l)] = np.zeros((self.n[l], 1))
         print("W" + str(l), param["W" + str(l)].shape, "b" + str(l),
               param["b" + str(l)].shape)
     return param
Exemplo n.º 6
0
def main():

    sleep_time = [10, 100, 150, 40, 50, 80, 80, 100, 10, 10, 10]
    mesg = ['X', 'C', 'CL', 'XL', 'L', 'XXC', 'XXC', 'C', 'X', 'X', 'X']

    util.heading("Thread Pool Executor - submit")
    with futures.ThreadPoolExecutor(max_workers=3,
                                    thread_name_prefix="t") as executor:
        list_future = seq(range(
            len(sleep_time))).map(lambda i: executor.submit(
                do_something, zz=sleep_time[i])).to_list()
        futures.wait(list_future)

    util.heading("Thread Pool Executor - map")
    with futures.ThreadPoolExecutor(max_workers=3,
                                    thread_name_prefix="t") as executor:
        executor.map(do_something, sleep_time, mesg)
Exemplo n.º 7
0
def main():
    util.heading("Order")
    c = C()

    util.heading("MRO -- Method resolution order")
    print(C.__mro__)

    util.heading("Diamond Inheritance")
    c.fun()

    util.heading("Resolving Diamond Inheritance")
    c.have_fun()
Exemplo n.º 8
0
def main():
    a = A()

    # Invoke instnace method, style-1
    a.method_instance()

    # Invoke instnace method, style-2
    A.method_instance(a)

    # Static method
    # -------------
    util.heading("Static Method")
    A.method_static(3)
    print("")

    # Class method
    # -------------
    util.heading("Class Method")
    obj = A.method_class(5)
    print("Object returned by class method Type:{0} Str:{1}".format(type(obj), str(obj)))
Exemplo n.º 9
0
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from functional import seq
import maga.ml.lib.tensor.tensor_util as tu
import pyduke.common.core_util as util

# -----------------------------------------------------------------------------
# Single Layer Neural Network
# -----------------------------------------------------------------------------

util.heading("Single NN layer")

# Number of features
n = 10

# First layer
L1 = 3

# X shall be a column matrix with 'n' feature rows.
# Each column shall be an instance
X = tf.placeholder(tf.float32, shape=(n, None))

W = tf.Variable(initial_value=tf.random_normal(shape=(L1, n)))
b = tf.Variable(initial_value=tf.ones(shape=(L1)))
Z = tf.matmul(W, X) + b
A = tf.sigmoid(Z)
result = tu.eval(A, feed_dict={X: np.random.rand(n, 3)})

# Note that the activation values are between 0 and 1 since we use sigmoid function.
print("A = sigmoid (W.X + b)", "\n", result)
Exemplo n.º 10
0
# Shuffle 
X_train, y_train, X_test, y_test = du.get_stratified_shuffle_split(X, y, test_size=141)

# Shapes
m, n = X_train.shape

# _________________________________________________________________________________________________
#
#                                         Random Forest Classifier
# _________________________________________________________________________________________________


# -------------------------------------------------------------------------------------------------
# Base Model
# -------------------------------------------------------------------------------------------------
cu.heading('[RandomForest][BaseModel]')

# Fit
from sklearn.ensemble import RandomForestClassifier
model_forest = RandomForestClassifier(random_state=du.SEED)
model_forest.fit(X_train, y_train)

# Predict Train
y_train_pred = model_forest.predict(X_train)
tuple_accuracy = du.get_scores(y_train, y_train_pred, title='[RandomForest][BaseModel][Train]')

# Train Accuracy = 0.9480
print ("Train Accuracy = {:2.4f}".format(tuple_accuracy[0]))

# Predict Test
y_test_pred = model_forest.predict(X_test)