コード例 #1
0
def encode_decode_mnist():
    from zhongrj.data.mnist import load_data

    model = mnist_autoencoder_model()

    if MODE == 'train':
        model.train(load_data()['train_x'])
    elif MODE == 'test':
        model.test()
コード例 #2
0
def semi_supervised_mnist():
    from zhongrj.data.mnist import load_data

    data = load_data()
    n = 1000
    train_x, train_y, test_x, test_y = data['train_x'][:n], data[
        'train_y'][:n], data['test_x'], data['test_y']
    train_x, test_x = train_x.reshape([-1, 28, 28,
                                       1]), test_x.reshape([-1, 28, 28, 1])

    encoder_model = mnist_autoencoder_model()
    code = encoder_model.sess.run(
        encoder_model.code, {
            encoder_model.x: np.vstack((train_x, test_x)),
            encoder_model.is_train: False
        })
    train_code, test_code = code[:n], code[n:]

    # ==================================== simple classifier ====================================
    x = tf.placeholder(tf.float32, [None, encoder_model.y_dims])
    y_actual = tf.placeholder(tf.float32, [None, 10])
    is_train = tf.placeholder(tf.bool)
    y_predict = CNN(x,
                    10,
                    dnn_units=[20, 20],
                    batch_noraml=True,
                    is_train=is_train)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_predict,
                                                labels=y_actual))
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1)),
                tf.float32))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(10000):
            mask = np.random.choice(n, 100)
            _, accuracy_, loss_ = sess.run([optimizer, accuracy, loss], {
                x: train_code[mask],
                y_actual: train_y[mask],
                is_train: True
            })
            print(loss_)
            # print(accuracy_)
        print(
            'Total Accuracy: ',
            sess.run(accuracy, {
                x: test_code,
                y_actual: test_y,
                is_train: True
            }))
コード例 #3
0
def __store_data():
    mnist = mnist_data.load_data()
    data = mnist['train_x']
    data_transform = []
    for i, img in enumerate(data):
        data_transform.append(1 - img)
        if i % 100 == 0:
            print('\r{}% 完成~'.format(i * 100 / len(data)), end='')

    data_transform = {'train_x': np.array(data_transform)}
    np.savez(STORE_DIR + FILE_NAME, **data_transform)
    return data_transform
コード例 #4
0
def classifier():
    model = showcnn(name='showcnn_classifier',
                    x_dims=[28, 28, 1],
                    y_classes=10,
                    cnn_units=[10, 15, 20],
                    dnn_units=[100, 50],
                    learning_rate=1e-2,
                    batch=100)

    from zhongrj.data.mnist import load_data

    if MODE == 'train':
        data = load_data()
        model.train(data['train_x'], data['train_y'])
    elif MODE == 'test':
        data = load_data()
        model.test(data['test_x'][:100], data['test_y'][:100])
    elif MODE == 'back':
        # mask = np.array([1, 2, 5001, 5002])
        # model.back(train_x[mask])
        pass
    elif MODE == 'show_layer':
        model.show_layer()
コード例 #5
0
def __store_data():
    """存储数据"""
    mnist = mnist_data.load_data()
    print('生成训练数据...')
    train_image_list, train_label_list = __create_distortions_data(
        mnist['train_x'], mnist['train_y'])
    print('生成测试数据...')
    test_image_list, test_label_list = __create_distortions_data(
        mnist['test_x'], mnist['test_y'])
    result = {
        'train_x': train_image_list,
        'train_y': train_label_list,
        'test_x': test_image_list,
        'test_y': test_label_list
    }
    np.savez(STORE_DIR + FILE_NAME, **result)
    return result
コード例 #6
0
def __store_data():
    mnist = mnist_data.load_data()
    mnist_train_x, mnist_train_y = mnist['train_x'].reshape(
        [-1, 28, 28]), mnist['train_y']

    train_x, train_y = [], []
    for i in range(10):
        image, labels = __create_multi_mnist(mnist_train_x, mnist_train_y)
        for label in labels:
            image = draw_rectangle(image,
                                   label[1:3],
                                   label[3:5],
                                   1,
                                   text=str(label[0]))
        train_x.append(image)
        train_y.append(labels)

    show_image(train_x[:1], 1)
コード例 #7
0
def mnist_classify():
    from zhongrj.data.mnist import load_data

    model = RNN(name='RNN_mnist_classify',
                x_dims=[28, 28],
                y_classes=10,
                input_units=128,
                cell_units=128,
                learning_rate=1e-2,
                batch=100)

    print('Loading data ...')
    data = load_data()

    if MODE == 'train':
        model.train(data['train_x'], data['train_y'])
    elif MODE == 'test':
        pass
コード例 #8
0
ファイル: FCN.py プロジェクト: yanyang729/machine-learning-1
def mnist_segmentation():
    from zhongrj.data.mnist import load_data

    model = FCN(name='FCN_mnist',
                x_dims=[28, 28, 1],
                y_classes=2,
                cnn_units=[20, 20],
                learning_rate=1e-3,
                batch=100)

    print('Loading Data ...')
    data = load_data()

    train_y = data['train_x'].copy()
    train_y[train_y > 0] = 1

    if MODE == 'train':
        model.train(data['train_x'], train_y)
    elif MODE == 'test':
        pass
コード例 #9
0
# y_predict = CNN_deprecated(x_image, is_train, 10, 'CNN', 3, 2, tf.nn.relu)
[
    print(param)
    for param in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'CNN')
]

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y_predict,
                                                        labels=y_actual)
loss = tf.reduce_mean(cross_entropy)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_pred = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

data = load_data()
train_x, train_y, test_x, test_y = data['train_x'], data['train_y'], data[
    'test_x'], data['test_y']

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(500):
        mask = np.random.choice(len(train_x), 100)
        feed_dict = {x: train_x[mask], y_actual: train_y[mask], is_train: True}
        _ = sess.run(optimizer, feed_dict)
        if i % 100 == 0:
            accuracy_ = sess.run(accuracy, feed_dict)
            print(accuracy_)

    print(
        '\n\n\nFinal Test: ',