Beispiel #1
0
            with tf.name_scope('b_fc2'):
                b_fc2 = bias_variable([2])
                tf.summary.histogram('fc_layer2', b_fc2)
            with tf.name_scope('y_conv'):
                y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
                tf.summary.histogram('fc_layer2', y_conv)
                return y_conv, keep_prob


# Import data
num_classes = 2
swallowsound = read_data_sets(dir,
                              gzip_compress=True,
                              train_imgaes='train-images-idx3-ubyte.gz',
                              train_labels='train-labels-idx1-ubyte.gz',
                              test_imgaes='t10k-images-idx3-ubyte.gz',
                              test_labels='t10k-labels-idx1-ubyte.gz',
                              one_hot=True,
                              validation_size=50,
                              num_classes=num_classes,
                              MSB=True)

# Create the model
with tf.name_scope('inputs'):
    x = tf.placeholder(tf.float32, [None, 250], name='x_input')
    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 2], name='y_input')

# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)

# cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y_conv)
Beispiel #2
0
def main(_):
    # Import data
    num_classes = 2
    swallowsound = read_data_sets(
        FLAGS.data_dir,
        gzip_compress=False,
        train_imgaes='train-images-idx3-ubyte',
        train_labels='train-labels-idx1-ubyte',
        test_imgaes='t10k-images-idx3-ubyte',
        test_labels='t10k-labels-idx1-ubyte',
        one_hot=True,
        validation_size=2000,  #验证集大小
        num_classes=num_classes,
        MSB=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 50])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 2])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                                logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
            cross_entropy)  #accuracy 0.97
        #train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)         #accuracy 0.94

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    graph_location = tempfile.mkdtemp()
    print('Saving graph to: %s' % graph_location)
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(10000):
            batch = swallowsound.train.next_batch(
                1000)  #batch 200 比 50的效果好  # batch(500) test accuracy 0.985714
            if i % 500 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            })

        print('test accuracy %g' % accuracy.eval(
            feed_dict={
                x: swallowsound.test.images,
                y_: swallowsound.test.labels,
                keep_prob: 1.0
            }))
Beispiel #3
0
def main(_):
    # Import data
    # 注意one_hot参数表示我们要吧标签生成向量,比如目前的标签是数字“7”,分类数为10,则要生成一个[0,0,0,0,0,0,1,0,0,0]的向量
    # 返回的结果是一个数据集,主要包括['train', 'validation', 'test']三个数据集,其中train和test的大小由原始数据确定,validation
    # 从原始数据的train集合中选取,其大小要小于train的大小,具体大小由validation_size参数确定
    # 数据集合中样本的分类数目由num_classes参数设置,标签label的值必须是[0,num_classes)之间的整数
    num_classes = 2
    # swallowsound = read_data_sets(FLAGS.data_dir,
    #                             gzip_compress=False,
    #                             train_imgaes='LearnSamples.bin',
    #                             train_labels='LearnSamplesflag.bin',
    #                             test_imgaes='TestSamples.bin',
    #                             test_labels='TestSamplesflag.bin',
    #                             one_hot=True,
    #                             validation_size=50,
    #                             num_classes = num_classes,
    #                             MSB=False)

    swallowsound = read_data_sets(FLAGS.data_dir,
                                gzip_compress=False,
                                train_imgaes='train-images-idx3-ubyte',
                                train_labels='train-labels-idx1-ubyte',
                                test_imgaes='t10k-images-idx3-ubyte',
                                test_labels='t10k-labels-idx1-ubyte',
                                one_hot=True,
                                validation_size=50,
                                num_classes=num_classes,
                                MSB=True)

    original_shape = swallowsound.train.original_shape
    if len(original_shape)<4:
      return

    size = original_shape[1]*original_shape[2]

    # Create the model
    x = tf.placeholder(tf.float32, [None, size])
    W = tf.Variable(tf.zeros([size, num_classes]))
    b = tf.Variable(tf.zeros([num_classes]))
    y = tf.matmul(x, W) + b

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, num_classes])

    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    # Train
    for _ in range(1000):
        batch_xs, batch_ys = swallowsound.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: swallowsound.test.images,
                                      y_: swallowsound.test.labels}))

    print("b: ",sess.run(b))