Пример #1
0
def main():

    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset(
    )

    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.

    # Convert training and test labels to one hot matrices
    Y_train = convert_to_one_hot(Y_train_orig, 6)
    Y_test = convert_to_one_hot(Y_test_orig, 6)
    #
    start_time = time.time()
    learned_parameters = model(X_train,
                               Y_train,
                               X_test,
                               Y_test,
                               num_epochs=600)

    print("--- %s seconds ---" % (time.time() - start_time))

    return learned_parameters
Пример #2
0
def __init__():
    # Loading the dataset
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = utils.load_dataset(
        'dataset/signs/test_signs.h5', 'dataset/signs/train_signs.h5')

    # Flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert
    # each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.

    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.
    # Convert training and test labels to one hot matrices
    Y_train = utils.convert_to_one_hot(Y_train_orig, 6)
    Y_test = utils.convert_to_one_hot(Y_test_orig, 6)

    print("number of training examples = " + str(X_train.shape[1]))
    print("number of test examples = " + str(X_test.shape[1]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))

    X, Y = utils.create_placeholders(12288, 6)
    print("X = " + str(X))
    print("Y = " + str(Y))

    parameters = model(X_train, Y_train, X_test, Y_test)
    print('PARAMETERS', parameters)
def main():
    print("result = " + str(linear_function()))

    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

    # index = 36
    # plt.imshow(X_train_orig[index])
    # print("y = " + str(np.squeeze(Y_train_orig[:, index])))
    # plt.show()

    # model-part
    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.

    # Convert training and test labels to one hot matrices
    Y_train = convert_to_one_hot(Y_train_orig, 6)
    Y_test = convert_to_one_hot(Y_test_orig, 6)
    #
    learned_parameters = model(X_train, Y_train, X_test, Y_test, num_epochs=1000)

    return learned_parameters
Пример #4
0
def get_dataset():
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
    index = 0
    #plt.imshow(X_train_orig[index])
    #print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
    # 扁平化
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # 简单的归一化
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.
    # one hot编码
    Y_train = convert_to_one_hot(Y_train_orig, 6)
    Y_test = convert_to_one_hot(Y_test_orig, 6)
    
    return X_train, X_test, Y_train, Y_test
Пример #5
0
def main():
    # points, labels, num_points_per_trip = read_db(points_table, labels_table)
    # points = filtering(points, num_points_per_trip, windows_size = 15)
    #
    # points_segmented = segmentation(points, seg_size, num_points_per_trip)
    # X_orig, Y_orig = XY_preparation(points_segmented, labels, seg_size, num_channels)
    # load data from npy files
    Y_orig = np.load('DNN_labels_data.npy')
    X_orig = np.load('DNN_segmented_data.npy')
    #X_orig = normalizing(X_orig)

    X_flatten = flattening_data(X_orig)
    X_train, X_test, Y_train, Y_test = split_train_test(X_flatten, Y_orig)
    Y_train = convert_to_one_hot(Y_train, 4)
    Y_test = convert_to_one_hot(Y_test, 4)
    parameters = model(X_train, Y_train, X_test, Y_test, seg_size)

    print("--- %s seconds ---" % (time.time() - start_time))
Пример #6
0
def preprocess_data(X_train_orig, Y_train_orig, X_test_orig, Y_test_orig):
    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.
    # Convert training and test labels to one hot matrices
    Y_train = tf_utils.convert_to_one_hot(Y_train_orig, 6)
    Y_test = tf_utils.convert_to_one_hot(Y_test_orig, 6)

    print("number of training examples = " + str(X_train.shape[1]))
    print("number of test examples = " + str(X_test.shape[1]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))

    return X_train, Y_train, X_test, Y_test
Пример #7
0
def main():
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset(
    )

    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.
    # Convert training and test labels to one hot matrices
    Y_train = convert_to_one_hot(Y_train_orig, 6)
    Y_test = convert_to_one_hot(Y_test_orig, 6)

    print("number of training examples = " + str(X_train.shape[1]))
    print("number of test examples = " + str(X_test.shape[1]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    parameters = model(X_train, Y_train, X_test, Y_test)
Пример #8
0
def preprocess_data(data_set):
    x, y = data_set
    print("Original data X shape: ", x.shape, " data Y shape: ", y.shape)

    x = x.reshape(x.shape[0], -1)
    y_one_hot = convert_to_one_hot(y, 10)
    y = y_one_hot.transpose()
    print("----- Reshape Original Trains Dataset Shape ------")
    print("Reshape data X as: ", x.shape, " data Y shape: ", y.shape)

    # x_reshape_train = tf.reshape(x_train, [x_train.shape[0], -1])
    # y_reshape_train = tf.transpose(y_train)

    # x_test_reshape = tf.reshape(x_test, [x_test.shape[0], -1])
    # y_test_reshape = tf.transpose(y_test)
    # x_test = x_test.reshape(x_test.shape[0], -1)
    # y_test = y_test.transpose()
    # print("Reshape Test data X as: ", x_test.shape, "Test data Y shape: ", y_test.shape)

    # input_x_size = tf.convert_to_tensor(input_x_flatten_size, dtype = tf.int32)
    # input_x_number_examples = tf.convert_to_tensor(x_train.shape[0], dtype=tf.int32)

    return (x, y)  #, (x_test, y_test)
Пример #9
0
if __name__ == '__main__':
    #读取数据
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = tf_utils.load_dataset(
    )

    #将图像展开
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    #归一化
    X_train = X_train_flatten / 255
    X_test = X_test_flatten / 255

    #转为one-hot 矩阵

    Y_train = tf_utils.convert_to_one_hot(Y_train_orig, 6)
    Y_test = tf_utils.convert_to_one_hot(Y_test_orig, 6)

    # 开始时间
    start_time = time.clock()
    # 开始训练
    parameters = model(X_train, Y_train, X_test, Y_test)
    # 结束时间
    end_time = time.clock()
    # 计算时差
    print("CPU的执行时间 = " + str(end_time - start_time) + " 秒")

    my_image1 = "3.jpg"

    fileName1 = r'E:\深度学习\第二课第三周编程作业\\' + my_image1
Пример #10
0
#index = np.random.randint(0, X_train_orig.shape[1])
#plt.imshow(X_train_orig[index])
#plt.show()
#print ("y = " + str(np.squeeze(Y_train_orig[:, index])))


# As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.

# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)

print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))


def create_placeholders(n_x, n_y):
    print(n_x,n_y)
    X = tf.placeholder(tf.float32, [n_x, None])
    Y = tf.placeholder(tf.float32, [n_y, None])
Пример #11
0
    dataset['Fare'] = dataset['Fare'].astype(int)

train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]

# Preparing Datasets for Logistics Regression
X_train_orig = train_df.drop(['Survived'], axis=1)
X_test_orig = test_df.drop(['PassengerId'], axis=1).copy()

X_train = np.array(X_train_orig[:][:]).T
X_test = np.array(X_test_orig[:][:]).T

numClasses = 2
Y_train = np.array(train_df['Survived'][:])
Y_train = Y_train.reshape(1, Y_train.shape[0])
Y_train = convert_to_one_hot(Y_train, numClasses)

# Printing the Shapes
print("Dataset Loaded.")
print("number of training examples = " + str(X_train.shape[1]))
print("number of test examples = " + str(X_test.shape[1]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))

#***********************************
#----STEP 4: INITIALIZING FUNCTIONS
#***********************************
'''The placeholder creater for X and Y as tensorflow placeholders'''

Пример #12
0
# %%
print(f'X_train.shape = {X_train.shape}')
print(f'Y_train.shape = {Y_train.shape}')
print(f'X_test.shape = {X_test.shape}')
print(f'Y_test.shape = {Y_test.shape}')
print(f'classes.shape = {classes.shape}')
print(f'classes = {classes}')

# %%
print(f'Y = {Y_train[0,11]}')
plt.imshow(X_train[11])

# %%
C = 6
X_train = X_train.reshape((X_train.shape[0], -1)).T / 255
Y_train = tf_utils.convert_to_one_hot(Y_train, C)
X_test = X_test.reshape((X_test.shape[0], -1)).T / 255
Y_test = tf_utils.convert_to_one_hot(Y_test, C)

# %%
print(f'X_train.shape = {X_train.shape}')
print(f'Y_train.shape = {Y_train.shape}')
print(f'X_test.shape = {X_test.shape}')
print(f'Y_test.shape = {Y_test.shape}')


# %%
def model_with_tf(X, Y, alpha=0.001, loops=10000):

    n_x = X.shape[0]
    n_y = Y.shape[0]
Пример #13
0
def main():
    np.random.seed(1)

    y_hat = tf.constant(36, name='y_hat')  # Define y_hat constant. Set to 36.
    y = tf.constant(39, name='y')  # Define y. Set to 39

    loss = tf.Variable((y - y_hat) ** 2, name='loss')  # Create a variable for the loss

    init = tf.global_variables_initializer()  # When init is run later (session.run(init)),
    # the loss variable will be initialized and ready to be computed
    with tf.Session() as session:  # Create a session and print the output
        session.run(init)  # Initializes the variables
        print(session.run(loss))  # Prints the loss

    a = tf.constant(2)
    b = tf.constant(10)
    c = tf.multiply(a, b)
    print(c)

    sess = tf.Session()
    print(sess.run(c))

    # Change the value of x in the feed_dict
    x = tf.placeholder(tf.int64, name='x')
    print(sess.run(2 * x, feed_dict={x: 3}))
    sess.close()


    """ TEST 1 """
    print("result = " + str(linear_function()))

    """ TEST 2 """
    print("sigmoid(0) = " + str(sigmoid(0)))
    print("sigmoid(12) = " + str(sigmoid(12)))

    """ TEST 3 """
    logits = sigmoid(np.array([0.2, 0.4, 0.7, 0.9]))
    cost = cost_func(logits, np.array([0, 0, 1, 1]))
    print("cost = " + str(cost))

    """ TEST 4 """
    labels = np.array([1, 2, 3, 0, 2, 1])
    one_hot = one_hot_matrix(labels, C=4)
    print("one_hot = " + str(one_hot))

    """ TEST 5 """
    print("ones = " + str(ones([3])))


    # Loading the dataset
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

    # Change the index below and run the cell to visualize some examples in the dataset.
    # Example of a picture
    index = 99
    #plt.imshow(X_train_orig[index])
    print("y = " + str(np.squeeze(Y_train_orig[:, index])))

    # Flatten the training and test images
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
    # Normalize image vectors
    X_train = X_train_flatten / 255.
    X_test = X_test_flatten / 255.
    # Convert training and test labels to one hot matrices
    Y_train = convert_to_one_hot(Y_train_orig, 6)
    Y_test = convert_to_one_hot(Y_test_orig, 6)

    print("number of training examples = " + str(X_train.shape[1]))
    print("number of test examples = " + str(X_test.shape[1]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))
    plt.show()

    """ TEST 6 """
    X, Y = create_placeholders(12288, 6)
    print("X = " + str(X))
    print("Y = " + str(Y))

    """ TEST 7 """
    tf.reset_default_graph()
    with tf.Session() as sess:
        parameters = initialize_parameters()
        print("W1 = " + str(parameters["W1"]))
        print("b1 = " + str(parameters["b1"]))
        print("W2 = " + str(parameters["W2"]))
        print("b2 = " + str(parameters["b2"]))


    """ TEST 8 """
    tf.reset_default_graph()

    with tf.Session() as sess:
        X, Y = create_placeholders(12288, 6)
        parameters = initialize_parameters()
        Z3 = forward_propagation(X, parameters)
        print("Z3 = " + str(Z3))

    """ TEST 9 """
    tf.reset_default_graph()

    with tf.Session() as sess:
        X, Y = create_placeholders(12288, 6)
        parameters = initialize_parameters()
        Z3 = forward_propagation(X, parameters)
        cost = compute_cost(Z3, Y)
        print("cost = " + str(cost))

    """ TEST 10 """
    parameters = model(X_train, Y_train, X_test, Y_test)


    """ TEST 11 """
    ## START CODE HERE ## (PUT YOUR IMAGE NAME)
    my_image = "thumbs_up.jpg"
    ## END CODE HERE ##

    # We preprocess your image to fit your algorithm.
    fname = "images/" + my_image
    image = np.array(ndimage.imread(fname, flatten=False))
    my_image = scipy.misc.imresize(image, size=(64, 64)).reshape((1, 64 * 64 * 3)).T
    my_image_prediction = predict(my_image, parameters)

    plt.imshow(image)
    print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
    plt.show()
Пример #14
0
def main():
    generateResizedImages = False
    reshuffleImages = False

    if generateResizedImages:
        imresize.generate_resized_images(256)

    if reshuffleImages:
        sourceFolders = [
            'E:\\Projects\\Hach2019\\Data\\ImageDataSetS256\\Plate',
            'E:\\Projects\\Hach2019\\Data\\ImageDataSetS256\\NonePlate'
        ]
        h5Target = 'E:\\Projects\\Hach2019\\Data\\ImageDataSetS256\\plate256.h5'
        dataset = utils.partition_dataset(sourceFolders, 0.8, 0.0, 0.2)
        utils.save_dataset_h5(target=h5Target, dataset=dataset)

        print("dataset shapes:")
        print("train_set_x: " + str(dataset["train_set_x"].shape))
        print("train_set_y: " + str(dataset["train_set_y"].shape))
        print("cv_set_x: " + str(dataset["cv_set_x"].shape))
        print("cv_set_y: " + str(dataset["cv_set_y"].shape))
        print("test_set_x: " + str(dataset["test_set_x"].shape))
        print("test_set_y: " + str(dataset["test_set_y"].shape))
        print("classes: " + str(dataset["classes"].shape))

    h5Source = 'E:\\Projects\\Hach2019\\Data\\ImageDataSetS256\\plate256.h5'
    tr_set_x, tr_set_y, cv_set_x, cv_set_y, ts_set_x, ts_set_y, classes = utils.load_dataset_h5(
        source=h5Source)

    print("loaded data shapes:")
    print("tr_set_x: " + str(tr_set_x.shape))
    print("tr_set_y: " + str(tr_set_y.shape))
    print("ts_set_x: " + str(ts_set_x.shape))
    print("ts_set_y: " + str(ts_set_y.shape))
    print("cv_set_x: " + str(cv_set_x.shape))
    print("cv_set_y: " + str(cv_set_y.shape))
    print("classes: " + str(classes.shape))

    # show one of the images of class 0
    index = 6
    _, ax = plt.subplots()
    ax.imshow(np.uint8(tr_set_x[index]))
    print("y = " + str(np.squeeze(tr_set_y[:, index])))
    plt.title("Class =" + str(np.squeeze(tr_set_y[:, index])))
    plt.show()

    # show one of the images of class 0
    index = 120
    _, ax = plt.subplots()
    ax.imshow(np.uint8(tr_set_x[index]))
    plt.title("Class =" + str(np.squeeze(tr_set_y[:, index])))
    plt.show()

    X_train = tr_set_x / 255.
    Y_train = utils.convert_to_one_hot(np.uint(tr_set_y), 2).T
    X_test = ts_set_x / 255.
    Y_test = utils.convert_to_one_hot(np.uint(ts_set_y), 2).T
    print("number of training examples = " + str(X_train.shape[0]))
    print("number of test examples = " + str(X_test.shape[0]))
    print("X_train shape: " + str(X_train.shape))
    print("Y_train shape: " + str(Y_train.shape))
    print("X_test shape: " + str(X_test.shape))
    print("Y_test shape: " + str(Y_test.shape))

    parameters = cnn.model(X_train,
                           Y_train,
                           X_test,
                           Y_test,
                           learning_rate=0.01,
                           num_epochs=50,
                           minibatch_size=64,
                           print_cost=True)
Пример #15
0
        plt.plot(np.squeeze(val_acc))
        plt.ylabel('accuracy')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()
         
        # lets save the parameters in a variable
        parameters = sess.run(parameters)
        print ("Parameters have been trained!")
       
        return parameters, val_acc, features

Train_X = scio.loadmat('HSI_GCN/Train_X.mat')
TrLabel = scio.loadmat('HSI_GCN/TrLabel.mat')

Test_X = scio.loadmat('HSI_GCN/Test_X.mat')
TeLabel = scio.loadmat('HSI_GCN/TeLabel.mat')

Train_X = Train_X['Train_X']
TrLabel = TrLabel['TrLabel']
Test_X = Test_X['Test_X']
TeLabel = TeLabel['TeLabel']

TrLabel = convert_to_one_hot(TrLabel-1, 16)
TeLabel = convert_to_one_hot(TeLabel-1, 16)

TrLabel = TrLabel.T
TeLabel = TeLabel.T

parameters, val_acc, features = train_mynetwork(Train_X, TrLabel, Test_X, TeLabel)
sio.savemat('features.mat', {'features': features})
Пример #16
0
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # lets save the parameters in a variable
        parameters = sess.run(parameters)
        print("Parameters have been trained!")

        return parameters, val_acc, features


X_train = scio.loadmat('HSI_CNN/X_train.mat')
Y_train = scio.loadmat('HSI_CNN/Y_train.mat')

X_test = scio.loadmat('HSI_CNN/X_test.mat')
Y_test = scio.loadmat('HSI_CNN/Y_test.mat')

X_train = X_train['X_train']
Y_train = Y_train['Y_train']
X_test = X_test['X_test']
Y_test = Y_test['Y_test']

Y_train = convert_to_one_hot(Y_train - 1, 16)
Y_test = convert_to_one_hot(Y_test - 1, 16)

Y_train = Y_train.T
Y_test = Y_test.T

parameters, val_acc, features = train_mynetwork(X_train, Y_train, X_test,
                                                Y_test)
sio.savemat('features.mat', {'features': features})
Пример #17
0
LiDAR_TrSet = scio.loadmat('HSI_LiDAR_FC/LiDAR_TrSet.mat')
HSI_TeSet = scio.loadmat('HSI_LiDAR_FC/HSI_TeSet.mat')
LiDAR_TeSet = scio.loadmat('HSI_LiDAR_FC/LiDAR_TeSet.mat')

TrLabel = scio.loadmat('HSI_LiDAR_FC/TrLabel.mat')
TeLabel = scio.loadmat('HSI_LiDAR_FC/TeLabel.mat')

HSI_TrSet = HSI_TrSet['HSI_TrSet']
LiDAR_TrSet = LiDAR_TrSet['LiDAR_TrSet']
HSI_TeSet = HSI_TeSet['HSI_TeSet']
LiDAR_TeSet = LiDAR_TeSet['LiDAR_TeSet']

TrLabel = TrLabel['TrLabel']
TeLabel = TeLabel['TeLabel']

Y_train = convert_to_one_hot(TrLabel - 1, 15)
Y_test = convert_to_one_hot(TeLabel - 1, 15)

Y_train = Y_train.T
Y_test = Y_test.T

MODEL = 'MML'  # 'MML': Multimodal learning (MML)
# 'CML-LiDAR':  Crossmodal learning (CML-LiDAR)
# 'CML-HSI':  Crossmodal learning (CML-HSI)

if MODEL.strip() == 'MML':
    parameters, val_acc, feature = train_mynetwork(HSI_TrSet, LiDAR_TrSet,
                                                   HSI_TeSet, LiDAR_TeSet,
                                                   Y_train, Y_test, 'MML')

if MODEL.strip() == 'CML-LiDAR':
import time
X_train_orig,Y_train_orig,X_test_orig,Y_test_orig,classes = tf_utils.load_dataset()
# index=11
# plt.imshow(X_train_orig[index])
# plt.show()
# print(np.squeeze(Y_train_orig))#从数组的形状中删除单维度条目,即把shape中为1的维度去掉

X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0],-1).T#每一列就是一个样本
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0],-1).T

#归一化数据
X_train = X_train_flatten/255
X_test = X_test_flatten/255

#转换为独热矩阵
Y_train = tf_utils.convert_to_one_hot(Y_train_orig,6)
Y_test = tf_utils.convert_to_one_hot(Y_test_orig,6)

# print('训练集样本数 = '+str(X_train.shape[1]))
# print('测试集样本数 = '+str(X_test.shape[1]))
# print('X_train.shape:'+str(X_train.shape))
# print('Y_train.shape:'+str(Y_train.shape))
# print('X_test.shape:'+str(X_test.shape))
# print('Y_test.shape:'+str(Y_test.shape))

#创建placeholders
def creat_placeholders(n_x,n_y):
    '''
    为TensorFlow创建占位符
    :param n_x: 一个实数,图片向量的大小(64*64*3 = 1228)
    :param n_y: 一个实数,分类数(从0到5,所以n_Y=6)
Пример #19
0
output:
    ones = [ 1.  1.  1.]
"""

index = 0
#plt.imshow(trainX[index])
#print ("y = " + str(np.squeeze(trainy[index])))

print(testX.shape[0])
X_train_flatten = trainX.reshape(trainX.shape[0], -1).T
X_test_flatten = testX.reshape(testX.shape[0], -1).T
#Normalize
X_train = X_train_flatten / 255.
X_test = X_test_flatten / 255.
#Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(trainy, 10)
Y_test = convert_to_one_hot(testy, 10)

print("number of training examples = " + str(X_train.shape[1]))
print("number of test examples = " + str(X_test.shape[1]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))


def create_placeholders(n_x, n_y):
    #tf.compat.v1.enable_eager_execution()
    X = tf.compat.v1.placeholder(tf.float32, shape=(n_x, None), name="X")
    Y = tf.compat.v1.placeholder(tf.float32, shape=(n_y, None), name="Y")
Пример #20
0
def train():

    BATCH_SIZE = 100

    LEARNING_RATE_BASE = 0.8
    LEARNING_RATE_DECAY = 0.99

    REGULARIZATION_RATE = 0.0001
    TRAINING_STEPS = 5000
    MOVING_AVERAGE_DECAY = 0.99

    (x_train, y_train_orig), (x_test, y_test_orig) = load_data()

    print("Original Train data X shape: ", x_train.shape,
          "Training data Y shape: ", y_train_orig.shape)
    print("Original Test data X shape: ", x_test.shape, "Test data Y shape: ",
          y_test_orig.shape)
    #print("before convert y_train_orig :", y_train_orig.shape, "y[0]: ", y_train_orig[0], "y[1]: ", y_train_orig[1],"y[2]: ", y_train_orig[2],)
    y_train = convert_to_one_hot(y_train_orig, 10)
    #print("after convert y_train_one_hot :",y_train.shape)
    y_test = convert_to_one_hot(y_test_orig, 10)

    print("----- Reshape Original Trains Dataset Shape ------")
    x_reshape_train = tf.reshape(x_train, [x_train.shape[0], -1])
    y_reshape_train = tf.transpose(y_train)

    print("Reshape Train data X as: ", x_reshape_train.shape,
          "Training data Y shape: ", y_reshape_train.shape)
    x_test_reshape = tf.reshape(x_test, [x_test.shape[0], -1])
    y_test_reshape = tf.transpose(y_test)
    print("Reshape Test data X as: ", x_test_reshape.shape,
          "Test data Y shape: ", y_test_reshape.shape)

    input_x_flatten_size = x_reshape_train.shape[1]
    input_x_size = tf.convert_to_tensor(input_x_flatten_size, dtype=tf.int32)
    input_x_number_examples = tf.convert_to_tensor(x_reshape_train.shape[0],
                                                   dtype=tf.int32)

    with tf.name_scope('input'):
        x = tf.placeholder(tf.float32,
                           shape=(None, input_x_flatten_size),
                           name='x-input')
        y_ = tf.placeholder(tf.float32,
                            shape=(None, OUTPUT_NODE),
                            name='y-input')

    # weights1 = tf.Variable(tf.truncated_normal([input_x_size, LAYER1_NODE], stddev = 0.1), name = "weights1")
    # biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]), name="biases1")

    # weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev = 0.1), name="weights2")
    # biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]), name = "biases2")

    # Forward propagation result
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = inference(x, regularizer, None)

    # Step of training number
    global_step = tf.Variable(0, trainable=False)
    with tf.name_scope('moving_average'):
        variabl_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variabl_averages_op = variabl_averages.apply(tf.trainable_variables())
    #print(tf.trainable_variables())
    # Forward propagation using sliding average

    average_y = inference(x, regularizer, variabl_averages, True)

    with tf.name_scope('loss_function'):
        # loss function
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.argmax(y_, 1))
        cross_entropy_mean = tf.reduce_mean(cross_entropy)

        # regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
        # regularization = regularizer(weights1) + regularizer(weights2)
        # Note using 'tf.add_n(tf.getcollection('lossess'))' replace original regularization method
        loss = cross_entropy_mean + tf.add_n(tf.get_collection('lossess'))

    with tf.name_scope('train_step'):
        # learning rate decay
        learning_rate = tf.train.exponential_decay(
            LEARNING_RATE_BASE, global_step,
            input_x_number_examples / BATCH_SIZE, LEARNING_RATE_DECAY)

        # Note as from https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer
        # global_step: Optional Variable to increment by one after the variables have been updated.
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss, global_step=global_step)

        with tf.control_dependencies([train_step, variabl_averages_op]):
            train_op = tf.no_op(name='train')

    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        # validation data sets
        validates_x = x_test_reshape.eval()
        validates_y = y_test_reshape.eval()
        validates_feed = {x: validates_x, y_: validates_y}

        # test data sets
        test_feed = {x: x_reshape_train.eval(), y_: y_reshape_train.eval()}
        seed = 3
        mini_batches = random_mini_batches(tf.transpose(x_reshape_train),
                                           tf.transpose(y_reshape_train),
                                           BATCH_SIZE, seed)

        for i in range(TRAINING_STEPS):

            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validates_feed)
                print(
                    "After %d training step(s), validation accuracy using average model is %g"
                    % (i, validate_acc))

            k = i % len(mini_batches)
            if k == 0:
                seed = seed + 1
                mini_batches = random_mini_batches(
                    tf.transpose(x_reshape_train),
                    tf.transpose(y_reshape_train), BATCH_SIZE, seed)

            mini_x_batches, mini_y_batches = mini_batches[k]
            sess.run(train_op,
                     feed_dict={
                         x: mini_x_batches,
                         y_: mini_y_batches
                     })

        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print(
            "After %d training step(s), testing accuracy using average model is %g"
            % (i, validate_acc))

    writer = tf.summary.FileWriter("../log2", tf.get_default_graph())
    writer.close()
Пример #21
0
    W3 = tf.convert_to_tensor(parameters["W3"])
    b3 = tf.convert_to_tensor(parameters["b3"])

    params = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3}

    x = tf.placeholder("float", [12288, 1])

    z3 = forward_propagation_for_predict(x, params)
    p = tf.argmax(z3)

    sess = tf.Session()
    prediction = sess.run(p, feed_dict={x: X})

    return prediction


# 载入数据
train_X, train_Y, test_X, test_Y = init_utils.load_dataset(is_plot=True)
train_Y = tf_utils.convert_to_one_hot(train_Y, 2)
test_Y = tf_utils.convert_to_one_hot(test_Y, 2)
layers_dims = [train_X.shape[0], 10, 5, 2]  #使用softmax时输出有2个对二分类
plt.show()

#开始训练
parameters = model(train_X,
                   train_Y,
                   test_X,
                   test_Y,
                   layers_dims,
                   learning_rate=0.01)
Пример #22
0
 year = 2015
 mth_data_prev = data_processor.aggregate(2014)
 mth_data = data_processor.aggregate(year)
 mth_data = mth_data_prev.append(mth_data)
 
 Y_train = data_processor.get_training_output_binary(mth_data)
 Y_train = Y_train.reindex(X_train.index)
 #PB here, so add this line in meantine
 Y_train = Y_train.fillna(0)
 
 #Transpose format, switch to numpy
 X_train = X_train.T.values
 Y_train = Y_train.T.values
 
 Y_train = Y_train.astype(int)
 Y_train = convert_to_one_hot(Y_train, 2)
 
 #parameters = model(X_train, Y_train)
 
 
 #Test Set
 #orig_data_test = pd.read_csv('sample_orig_2017.txt', header = None, sep = '|', index_col = 19)
 year = 2016
 orig_data_test = input_transco.aggregate(year)
 
 orig_data_test.columns = orig_col
 
 #Transforming string values to Numerical Values
 string_labels = ['flag_fthb','occpy_sts','channel','ppmt_pnlty','prod_type','st', \
               'prop_type','loan_purpose','seller_name','servicer_name', 'flag_sc']
 X_test= input_transco.label_to_num_test(orig_data_test, string_labels, dic_transco_dic)
Пример #23
0
    #        res = array2input(datause,channels,framesra, 10)
    #        add_into_Trainset(res, 5, path_x_Train = "x_train.npy" , path_y_Train = "y_train.npy")
    #    for i in range(148,178):
    #        datause, channels, sampwidth, framesra, frameswav = wave_as_array('D:\\training_data\\Noise\\Noise_%d.wav'%i)
    #        res = array2input(datause,channels,framesra, 10)
    #        add_into_Testset(res, 5, path_x_Test = "x_test.npy" , path_y_Test = "y_test.npy")

    # Label 6 : rail 110/13

    #    for i in range(1,111):
    #        datause, channels, sampwidth, framesra, frameswav = wave_as_array('D:\\training_data\\Rail\\rail_%d.wav'%i)
    #        res = array2input(datause,channels,framesra, 10)
    #        add_into_Trainset(res, 6, path_x_Train = "x_train.npy" , path_y_Train = "y_train.npy")
    #    for i in range(111,124):
    #        datause, channels, sampwidth, framesra, frameswav = wave_as_array('D:\\training_data\\Rail\\rail_%d.wav'%i)
    #        res = array2input(datause,channels,framesra, 10)
    #        add_into_Testset(res, 6, path_x_Test = "x_test.npy" , path_y_Test = "y_test.npy")

    path_x_Train = 'C:/Users/niuzhengnan/Desktop/sr/x_train.npy'
    x_train = np.load(path_x_Train)
    x_train = x_train.T
    path_y_Train = 'C:/Users/niuzhengnan/Desktop/sr/y_train.npy'
    y_train_orig = np.load(path_y_Train)
    y_train = convert_to_one_hot(y_train_orig, 7)
    path_x_Test = 'C:/Users/niuzhengnan/Desktop/sr/x_test.npy'
    path_y_Test = 'C:/Users/niuzhengnan/Desktop/sr/y_test.npy'
    x_test = np.load(path_x_Test)
    x_test = x_test.T
    y_test_orig = np.load(path_y_Test)
    y_test = convert_to_one_hot(y_test_orig, 7)
    parameters, accuracy = model(x_train, y_train, x_test, y_test, n_l3=7)

# (1080,64,64,3)  (1,1080)    (120,64,64,3)  (1,120)   [1 2 3 4 5]
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = tf_utils.load_dataset(
)

# 归一化数据
X_train = X_train_orig / 255
X_test = X_test_orig / 255

# reshape
X_train = X_train.reshape(X_train.shape[0], -1).T
X_test = X_test.reshape(X_test.shape[0], -1).T

# 转换为one-hot矩阵
Y_train = tf_utils.convert_to_one_hot(
    Y_train_orig, 6)  # 根据label进行独热编码 行=深度=6=[012345], 列=训练样本数 =1080
Y_test = tf_utils.convert_to_one_hot(
    Y_test_orig, 6)  # 根据label进行独热编码 行=深度=6=[012345], 列=训练样本数 =120


def create_placeholders(n_x, n_y):  # 12288 \ 6
    """
    为TensorFlow会话创建占位符
    参数:
        n_x - 一个实数,图片向量的大小(12288)--->输入节点的数目
        n_y - 一个实数,分类数(从0到5,所以n_y = 6)--->输出节点的数目

    返回:
        X - 一个数据输入的占位符,维度为[n_x, None],dtype = "float"
        Y - 一个对应输入的标签的占位符,维度为[n_Y,None],dtype = "float"
Пример #25
0
def GestureRecognition():
    learning_rate = 0.0001
    num_epochs = 1500
    minibatch_size = 32
    print_cost = True
    is_plot = True
    costs = []

    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = tf_utils.load_dataset(
    )
    X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0],
                                           -1).T  #每一列就是一个样本
    X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T

    m = X_train_flatten.shape[1]

    #归一化数据
    X_train = X_train_flatten / 255
    X_test = X_test_flatten / 255

    #转换为独热矩阵
    Y_train = tf_utils.convert_to_one_hot(Y_train_orig, 6)
    Y_test = tf_utils.convert_to_one_hot(Y_test_orig, 6)

    tf.set_random_seed(1)
    seed = 3
    tf.compat.v1.reset_default_graph()  # 用于清除默认图形堆栈并重置全局默认图形。

    X = tf.compat.v1.placeholder(tf.float32, [X_train.shape[0], None],
                                 name="X")
    Y = tf.compat.v1.placeholder(tf.float32, [Y_train.shape[0], None],
                                 name="Y")

    parameters = InitParameter([12288, 25, 12, 6])

    z = ForwardPropagation(X, parameters)
    cost = ComputeCost(z, Y)
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)
    init = tf.global_variables_initializer()

    with tf.compat.v1.Session() as sess:
        sess.run(init)
        for epoch in range(num_epochs):
            epoch_cost = 0  # 每代的成本
            num_minibatches = int(m / minibatch_size)  # minibatch的总数量
            seed = seed + 1
            minibatches = tf_utils.random_mini_batches(X_train, Y_train,
                                                       minibatch_size, seed)

            for minibatch in minibatches:
                # 选择一个minibatch
                (minibatch_X, minibatch_Y) = minibatch

                # 数据已经准备好了,开始运行session
                _, minibatch_cost = sess.run([optimizer, cost],
                                             feed_dict={
                                                 X: minibatch_X,
                                                 Y: minibatch_Y
                                             })

                # 计算这个minibatch在这一代中所占的误差
                epoch_cost = epoch_cost + minibatch_cost / num_minibatches

            # 记录并打印成本
            ## 记录成本
            if epoch % 5 == 0:
                costs.append(epoch_cost)
                # 是否打印:
                if print_cost and epoch % 100 == 0:
                    print("epoch = " + str(epoch) + "    epoch_cost = " +
                          str(epoch_cost))

        # 是否绘制图谱
        if is_plot:
            plt.plot(np.squeeze(costs))
            plt.ylabel('cost')
            plt.xlabel('iterations (per tens)')
            plt.title("Learning rate =" + str(learning_rate))
            plt.show()

        # 保存学习后的参数
        parameters = sess.run(parameters)
        print("参数已经保存到session。")

        # 计算当前的预测结果
        correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))

        # 计算准确率
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        print("训练集的准确率:", accuracy.eval({X: X_train, Y: Y_train}))
        print("测试集的准确率:", accuracy.eval({X: X_test, Y: Y_test}))

        return parameters
Пример #26
0

tf.reset_default_graph()
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
plt.show()
print("y = " + str(np.squeeze(Y_train_orig[:, index])))

# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten / 255.
X_test = X_test_flatten / 255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)

n = DeepNeuralNetwork("DNN1", [12288, 25, 12, 6], name="Deep Neural Network 1")

n.iteration_unit = 1

n.Train(X_train, Y_train)

n.Query(X_train, Y_train)
n.Query(X_test, Y_test)
print("y train: " + str(Counter(y_train.reshape(-1))))

exp = 5
x_test_over, y_test_over = expand_dataset(x_test, y_test, exp)
x_train_, y_train_ = expand_dataset(x_train, y_train, exp)
x_train = np.vstack((x_train, x_train_))
y_train = np.vstack((y_train, y_train_))
x_train = np.vstack((x_train, x_test_over))
y_train = np.vstack((y_train, y_test_over))
idx = [i for i in range(x_train.shape[0])]
idx = np.random.permutation(idx)
x_train = x_train[idx]
y_train = y_train[idx]

y_train = convert_to_one_hot(y_train - 1, n_class)
y_test = convert_to_one_hot(y_test - 1, n_class)

print("x_train_shape: " + str(x_train.shape))
print("y_train_shape: " + str(y_train.shape))
print("x_test_shape: " + str(x_test.shape))
print("y_test_shape: " + str(y_test.shape))
max_accu_echo = 0
max_accu_i = 0
t, test_cost, train_cost = model(x_train.T,
                                 y_train,
                                 x_test.T,
                                 y_test,
                                 learning_rate=0.0001,
                                 lambda_=0.014,
                                 num_epochs=0,