コード例 #1
0
ファイル: chapter13.py プロジェクト: uuspeaker/ML
image = china[150:220, 130:250]
height, width, channels = image.shape
image_grayscale = image.mean(axis=2).astype(np.float32)

images = image_grayscale.reshape(1, height, width, 1)

fmap = np.zeros(shape=(7, 7, 1, 2), dtype=np.float32)
fmap[:, 3, 0, 0] = 1
fmap[3, :, 0, 1] = 1

plot_image(fmap[:, :, 0, 0])
plt.show()
plot_image(fmap[:, :, 0, 1])
plt.show()

reset_graph()

X = tf.placeholder(tf.float32, shape=(None, height, width, 1))
feature_maps = tf.constant(fmap)
convolution = tf.nn.conv2d(X,
                           feature_maps,
                           strides=[1, 1, 1, 1],
                           padding="SAME")

# with tf.Session() as sess:
#     output = convolution.eval(feed_dict={X: images})
#
# plot_image(images[0, :, :, 0])
# save_fig("china_original", tight_layout=False)
# plt.show()
コード例 #2
0
    print('slope: ' + str(slope))
    print('y_intercept: ' + str(y_intercept))

    best_fit = []
    for i in x_vals:
        best_fit.append(slope * i + y_intercept)

    # Plot the results
    plt.plot(x_vals, y_vals, 'o', label='Data')
    plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3)
    plt.legend(loc='upper left')
    plt.show()


util.reset_graph()
A = tf.constant(np.column_stack((x_vals_column, ones_column)))
b = tf.constant(np.transpose(np.matrix(y_vals)))
with tf.Session() as sess:
    tA_A = tf.matmul(tf.transpose(A), A)
    L = tf.cholesky(tA_A)
    tA_b = tf.matmul(tf.transpose(A), b)
    sol1 = tf.matrix_solve(L, tA_b)
    sol2 = tf.matrix_solve(tf.transpose(L), sol1)

    solution_eval = sess.run(sol2)

    # Extract coefficients
    slope = solution_eval[0][0]
    y_intercept = solution_eval[1][0]
コード例 #3
0
ファイル: reg.py プロジェクト: 631068264/learn-sktf
def reg(loss_func='l2', learn_rate=0.01, max_iter=100, batch_size=32):
    util.reset_graph()
    with tf.Session() as sess:
        x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
        y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)

        A = tf.Variable(tf.random_normal(shape=[1, 1]))
        b = tf.Variable(tf.random_normal(shape=[1, 1]))
        # y = Ax + b
        model_output = tf.add(tf.matmul(x_data, A), b)

        if loss_func == 'l2':
            loss = tf.reduce_mean(tf.square(y_target - model_output))
        elif loss_func == 'l1':
            loss = tf.reduce_mean(tf.abs(y_target - model_output))
        elif loss_func == 'lasso':
            lasso_param = tf.constant(0.9)
            heavyside_step = tf.truediv(
                1.,
                tf.add(1.,
                       tf.exp(tf.multiply(-100., tf.subtract(A,
                                                             lasso_param)))))
            regularization_param = tf.multiply(heavyside_step, 99.)
            loss = tf.add(tf.reduce_mean(tf.square(y_target - model_output)),
                          regularization_param)
        elif loss_func == 'ridge':
            ridge_param = tf.constant(1.)
            ridge_loss = tf.reduce_mean(tf.square(A))
            loss = tf.expand_dims(
                tf.add(tf.reduce_mean(tf.square(y_target - model_output)),
                       tf.multiply(ridge_param, ridge_loss)), 0)

        my_opt = tf.train.GradientDescentOptimizer(learn_rate)
        train_step = my_opt.minimize(loss)

        init = tf.global_variables_initializer()
        init.run()

        loss_vec = []
        for i in range(max_iter):
            rand_index = np.random.choice(len(x_vals), size=batch_size)
            rand_x = np.transpose([x_vals[rand_index]])
            rand_y = np.transpose([y_vals[rand_index]])

            sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
            temp_loss = sess.run(loss,
                                 feed_dict={
                                     x_data: rand_x,
                                     y_target: rand_y
                                 })

            if loss_func == 'l1' or loss_func == 'l2':
                loss_vec.append(temp_loss)
            else:
                loss_vec.append(temp_loss[0])
            if (i + 1) % 50 == 0:
                print('Step #' + str(i + 1) + ' A = ' + str(sess.run(A)) +
                      ' b = ' + str(sess.run(b)))
                print('Loss = ' + str(temp_loss))

        [slope] = sess.run(A)
        [y_intercept] = sess.run(b)

        # Get best fit line
        best_fit = []
        for i in x_vals:
            best_fit.append(slope * i + y_intercept)

        # Plot the result
        plt.plot(x_vals, y_vals, 'o', label='Data Points')
        plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3)
        plt.legend(loc='upper left')
        plt.title('Sepal Length vs Pedal Width')
        plt.xlabel('Pedal Width')
        plt.ylabel('Sepal Length')
        plt.show()

        # Plot loss over time
        plt.plot(loss_vec, 'k-')
        plt.title('L2 Loss per Generation')
        plt.xlabel('Generation')
        plt.ylabel('L2 Loss')
        plt.show()