示例#1
0
def main(_):
    ds = dataset.read_data_sets(FLAGS.data_dir)

    x = tf.placeholder(tf.float32, [None, 500])
    w = tf.Variable(tf.zeros([500, 3]))
    b = tf.Variable(tf.zeros([3]))
    y = tf.nn.softmax(tf.matmul(x, w) + b)
    # y = tf.matmul(x, w) + b

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 3])

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Train
    for _ in range(1000):
        batch_xs, batch_ys = ds.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: ds.test.features, y_: ds.test.labels}))
 def __init__(self, model, _dataset=None):  # @look
     self.model = model
     if _dataset:
         self.dataset = _dataset
     else:
         self.dataset = dataset.read_data_sets()
     self.z = self.generate_z()
示例#3
0
def run_train():
    """Train CAPTCHA for a number of steps."""

    test_data = dataset.read_data_sets(
        dataset_dir='/home/sw/Documents/rgb-nir2/nirscene1/field_2ch.npz')
    with tf.Graph().as_default():

        images_pl1, images_pl2, labels_pl = placeholder_inputs(BATCH_SIZE)
        conv_features1, features1 = model.get_features(images_pl1, reuse=False)
        conv_features2, features2 = model.get_features(images_pl2, reuse=True)
        predicts = tf.sqrt(
            tf.reduce_sum(tf.square(features1 - features2), axis=1))

        saver = tf.train.Saver()
        sess = tf.Session()

        saver.restore(sess, "ckpt/model.ckpt-479000")

        print('Test Data Eval:')
        do_eval(sess,
                predicts,
                images_pl1,
                images_pl2,
                labels_pl,
                test_data,
                name='notredame')

        sess.close()
示例#4
0
def run_test():
  """Train CAPTCHA for a number of steps."""
  test_data = dataset.read_data_sets(dataset_dir = '/home/sw/Documents/rgb-nir2/nirscene1/water_2ch.npz')
  with tf.Graph().as_default():
    
    images_pl1, images_pl2, labels_pl = placeholder_inputs(BATCH_SIZE)
    conv_features1,features1 = model.get_features(images_pl1, reuse = False)
    conv_features2,features2 = model.get_features(images_pl2, reuse = True)
    predicts = tf.sqrt(tf.reduce_sum(tf.square(features1-features2),axis=1))
    
    saver = tf.train.Saver()
    sess = tf.Session()
    saver.restore(sess, "ckpt/model.ckpt-479000")

    outputs = []
    labels = []
    
    steps_per_epoch = test_data.num_examples // BATCH_SIZE
    num_examples = steps_per_epoch * BATCH_SIZE
    
    for step in range(steps_per_epoch):
      feed_dict, label = fill_feed_dict(test_data,images_pl1,images_pl2,labels_pl,shuffle=False)
      predicts_value = sess.run(predicts,feed_dict=feed_dict)
      predicts_value = 2-predicts_value
      outputs.extend(predicts_value)
      labels.extend(label)

      view_bar('processing:', step, steps_per_epoch)

    draw_roc(outputs,labels)
    sess.close()
示例#5
0
def generate():
#    data1 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=1,
#                                   num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data1_index)
#    data2 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=1,
#                                   num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data2_index)
    data3 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=0,
                                   num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data3_index)

    for i in range(num_sample):
        print("---------- Iteration " + str(i) + " ----------")
#        train.Train(MODEL1_DIR + str(i), data1)
#        train.Train(MODEL2_DIR + str(i), data2)
        train.Train(MODEL3_DIR + str(i), data3)
示例#6
0
        test_labels30 = np.reshape(test_labels[30 * 10 + j], (1, 8))
    else:
        test_images30 = np.concatenate(
            (test_images30,
             np.reshape(test_images[30 * 10 + j],
                        (1, 61, 25, options["MEASURE"]))))
        test_labels30 = np.concatenate(
            (test_labels30, np.reshape(test_labels[30 * 10 + j], (1, 8))))

print(np.average(test_labels30, 0))

# ---------------------------train------------------------

dataset = ds.read_data_sets(images,
                            labels,
                            test_images30,
                            test_labels30,
                            fake_data=0)
cnn = model_cnn_regression.ModelCnnRegression(mode='start',
                                              options=options,
                                              dataset=dataset)
cnn.BATCH_SIZE = 32
cnn.TEST_BATCH_SIZE = 10
cnn.train()
# test_x = np.reshape(images2, (-1, (cnn.CYCLE + 1) * cnn.MEASURE * cnn.STATE))
# test_y = np.reshape(labels2, (-1, 8))
# pred_y = np.array(cnn.predict(test_x, test_y))
# pred_y = pred_y.reshape(labels2.shape)
#
# data_overview.dataset_overview(labels2)
#
示例#7
0
    args = load_config(os.path.join(data_path, 'config.txt'))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))
    h = hypergraph(args)
    h.load()
    return h


if __name__ == '__main__':
    args = parser.parse_args()
    if args.options is not None:
        args = load_config(args.options)
    if args.seed is not None:
        np.random.seed(args.seed)
    dataset = read_data_sets(args.data_path)
    args.dim_feature = [
        sum(dataset.train.nums_type) - n for n in dataset.train.nums_type
    ]
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))
    h = hypergraph(args)
    begin = time.time()
    h.train(dataset)
    end = time.time()
    print("time, ", end - begin)
    h.save()
    h.save_embeddings(dataset)
    K.clear_session()
    # Cost function
    cross_entropy =  -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y_conv, 1e-15, 1.0)))

    # Exponentially decaying learning rate
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100, 0.98, staircase=True)
    # Passing global_step to minimize() will increment it at each step.

    # Optimizer
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy, global_step=global_step)

    # Accuracy
    correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    datasets = dataset.read_data_sets('train/train', 'train/validation', 'whales.csv', 'whales.csv')

    sess.run(tf.initialize_all_variables())

    # weights control
    # W_conv1_mean = tf.reduce_mean(tf.reduce_mean(tf.abs(W_conv1), 1), 0)
    # W_conv2_mean = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(tf.abs(W_conv2), 0), 0), 0)
    # W_conv3_mean = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(tf.abs(W_conv3), 0), 0), 0)
    # W_conv4_mean = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(tf.abs(W_conv4), 0), 0), 0)
    # W_conv5_mean = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(tf.abs(W_conv5), 0), 0), 0)
    # W_fc1_mean = tf.reduce_mean(tf.abs(W_fc1), 0)
    w_fc2_mean = tf.reduce_mean(tf.abs(w_fc2))

    h_conv1_mean = tf.reduce_mean(tf.abs(h_conv1))
    h_conv2_mean = tf.reduce_mean(tf.abs(h_conv2))
    h_conv3_mean = tf.reduce_mean(tf.abs(h_conv33))
示例#9
0
文件: test.py 项目: MrWater/XPyTest
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 11:10:59 2017
@author: shier43
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import dataset
#获得数据集
mnist = dataset.read_data_sets("MNIST_data/", one_hot=True)
 
#打印Training data size
print("Training data:",mnist.train.num_examples)
#打印Validating data size
 
 
 
 
import tensorflow as tf
 
#输入图像数据占位符
x = tf.placeholder(tf.float32, [None, 784])
 
#权值和偏差
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
 
#使用softmax模型
y = tf.nn.softmax(tf.matmul(x, W) + b)
 
示例#10
0
DEFAULT_OPTIONS = {'CYCLE': 60,
                   'MEASURE': 18,
                   'STATE': 25,
                   'ckpt_name': 'CNN_1',
                   'BATCH_SIZE': 200,
                   'TEST_BATCH_SIZE': 300,
                   'MAX_ITERATION': 200000,
                   'learning_step': [1000, 2000, 4000, 8000, 12000],
                   'learning_rate': [1e-3, 5e-4, 1e-4, 5e-5, 1e-5, 5e-6]}

options = DEFAULT_OPTIONS
MASK = [1, 1,    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

images = numpy.load(r"./npy/images.npy")
labels = numpy.load(r"./npy/labels.npy")
test_images = numpy.load(r"./npy/test_images.npy")
test_labels = numpy.load(r"./npy/test_labels.npy")

images[:, :, :, 1] = images[:, :, :, 1] / 20000

for i in range(18):
    if MASK[i] == 0:
        images[:, :, :, i] = 0
        test_images[:, :, :, i] = 0

dataset = dataset.read_data_sets(images, labels, test_images, test_labels)
cnn = model_cnn_regression.ModelCnnRegression(mode='train', options=options, dataset=dataset)
cnn.run()
# dnn = model_dnn.ModelDnnRegression(mode='train', options=options, dataset=dataset)
# dnn.run()
def run_training(): 
    # Basic model parameters as external flags.
    # flags = tf.app.flags
    # FLAGS = flags.FLAGS
    # flags.DEFINE_string('input_dir', 'input', 'Input Directory.')

    #LOAD PACKAGES 
    # print 'Path in the argument:', str(sys.argv[1])
    mnist = dataset.read_data_sets(one_hot=True) 
    trainimgs   = mnist.train.images 
    trainlabels = mnist.train.labels 
    testimgs    = mnist.test.images 
    testlabels  = mnist.test.labels 
    ntrain      = trainimgs.shape[0] 
    ntest       = testimgs.shape[0] 
    dim         = trainimgs.shape[1] 
    nout        = trainlabels.shape[1] 
    print ("Packages loaded") 
      
    weights = { 
    'ce1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1],stddev=0.1)), 
    'ce2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2],stddev=0.1)), 
    'ce3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3],stddev=0.1)), 
    'ce4': tf.Variable(tf.random_normal([ksize, ksize, n3, n4],stddev=0.1)), 
    'ce5': tf.Variable(tf.random_normal([ksize, ksize, n4, n5],stddev=0.1)), 
    'ce6': tf.Variable(tf.random_normal([ksize, ksize, n5, n6],stddev=0.1)), 
    'cd6': tf.Variable(tf.random_normal([ksize, ksize, n5, n6],stddev=0.1)), 
    'cd5': tf.Variable(tf.random_normal([ksize, ksize, n4, n5],stddev=0.1)), 
    'cd4': tf.Variable(tf.random_normal([ksize, ksize, n3, n4],stddev=0.1)), 
    'cd3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3],stddev=0.1)), 
    'cd2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2],stddev=0.1)), 
    'cd1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1],stddev=0.1)) 
    } 
    biases = { 
    'be1': tf.Variable(tf.random_normal([n1], stddev=0.1)), 
    'be2': tf.Variable(tf.random_normal([n2], stddev=0.1)), 
    'be3': tf.Variable(tf.random_normal([n3], stddev=0.1)), 
    'be4': tf.Variable(tf.random_normal([n4], stddev=0.1)), 
    'be5': tf.Variable(tf.random_normal([n5], stddev=0.1)), 
    'be6': tf.Variable(tf.random_normal([n6], stddev=0.1)), 
    'bd6': tf.Variable(tf.random_normal([n5], stddev=0.1)), 
    'bd5': tf.Variable(tf.random_normal([n4], stddev=0.1)), 
    'bd4': tf.Variable(tf.random_normal([n3], stddev=0.1)), 
    'bd3': tf.Variable(tf.random_normal([n2], stddev=0.1)), 
    'bd2': tf.Variable(tf.random_normal([n1], stddev=0.1)), 
    'bd1': tf.Variable(tf.random_normal([1],  stddev=0.1)) 
    } 
     
    print ("Network ready") 
     
    x = tf.placeholder(tf.float32, [None, dim]) 
    y = tf.placeholder(tf.float32, [None, dim]) 
    keepprob = tf.placeholder(tf.float32) 
    pred = cae(x, weights, biases, keepprob)
    #['out'] 
    cost = tf.reduce_sum(tf.square(cae(x, weights, biases, keepprob)- tf.reshape(y, shape=[-1, 256, 256, 1]))) 
    learning_rate = 0.001 
    optm = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    init = tf.global_variables_initializer()
    print ("Functions ready")
     
    sess = tf.Session() 
    sess.run(init) 
    # mean_img = np.mean(mnist.train.images, axis=0) 
    mean_img = np.zeros((65536)) 
    # Fit all training data 
    batch_size = 128 
    n_epochs   = 251
     
    print("Start training..") 
    for epoch_i in range(n_epochs): 
        for batch_i in range(mnist.train.num_examples // batch_size): 
            batch_xs, _ = mnist.train.next_batch(batch_size) 
            trainbatch = np.array([img - mean_img for img in batch_xs]) 
            trainbatch_noisy = trainbatch
            # trainbatch_noisy = trainbatch + 0.3*np.random.randn( 
            #     trainbatch.shape[0], 65536) 
            # f, a = plt.subplots(2, 2, figsize=(10, 5))
            # a[0][0].imshow(np.reshape(trainbatch[0], (256, 256))) 
            # a[0][1].imshow(np.reshape(trainbatch[1], (256, 256))) 

            # a[1][0].imshow(np.reshape(trainbatch_noisy[0], (256, 256))) 
            # a[1][1].imshow(np.reshape(trainbatch_noisy[1], (256, 256))) 
            # f.show()
            # plt.draw()
            # plt.show()
            sess.run(optm, feed_dict={x: trainbatch_noisy, y: trainbatch, keepprob: 0.7}) 
        print ("[%02d/%02d] cost: %.4f" % (epoch_i, n_epochs, sess.run(cost, feed_dict={x: trainbatch, y: trainbatch, keepprob: 1.}))) 
示例#12
0
tfs = list(chain.from_iterable([
    [partial(TF_rotate, angle=p) for p in [2.5, -2.5, 5, -5, 10, -10]],
    [partial(TF_zoom, scale=p) for p in [0.9, 1.1]],
    [partial(TF_shear, shear=p) for p in [0.1, -0.1, 0.2, -0.2, 0.4, -0.4]],
    [partial(TF_swirl, strength=p) for p in [0.1, -0.1, 0.2, -0.2, 0.4, -0.4]],
    [partial(TF_elastic_deform, alpha=p) for p in [1.0, 1.25, 1.5]],
    [TF_erosion, TF_dilation]
]))

#####################################################################

if __name__ == '__main__':

    # Load MNIST data
    dims     = [28, 28, 1]
    DATA_DIR = 'experiments/mnist/data'
    if not os.path.exists(DATA_DIR):
        os.makedirs(DATA_DIR)
    
    data_iterator = read_data_sets(DATA_DIR, one_hot=True)
    X_train, Y_train = data_iterator.train.images, data_iterator.train.labels
    X_v, Y_v = data_iterator.validation.images, data_iterator.validation.labels
    X_test, Y_test = data_iterator.test.images, data_iterator.test.labels

    if FLAGS.n_folds > 0:
        X_train, Y_train = select_fold(X_train, Y_train)

    # Run training scripts
    train(X_train, dims, tfs, Y_train=Y_train, X_valid=X_v, Y_valid=Y_v,
        n_classes=10)
示例#13
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
import argparse

import dataset

data1 = dataset.read_data_sets("./data/mnist/", "./data/emnist", reshape=False, one_hot=True,
                               num_train=10, num_test=10, data_index = [1]*10, noise=1)
print(data1.train.images.shape)
print(data1.train.labels.shape)
print(data1.test.images.shape)
print(data1.test.labels.shape)

#mnist = dataset.read_data_sets("./data/mnist", "./data/emnist", reshape=False, one_hot=True, num_train=100, num_test=40, data_index=[1, 1, 2, 2, 1, 1, 2, 2, 1, 2])

#train_data = mnist.train.images
#train_labels = mnist.train.labels

#train_data, train_labels = utils.LoadTrain()

#print(train_data.shape)
#print(train_labels.shape)

#for i in range(50):
#    print(train_labels[i])
#    utils.Convert2Image(train_data[i], "/home/ubuntu/image1/" + str(i) + ".png")
示例#14
0
import dataset as ds
import tensorflow as tf
'''
Loss: 0.5063 ~ Acc: 0.7983
Loss: 0.5056 ~ Acc: 0.7997
Loss: 0.5052 ~ Acc: 0.8025

two convolutional layers => flatten layer => fully connected layer
'''

data_sets = ds.read_data_sets()
writer = tf.summary.FileWriter('/tmp/logs')

with tf.name_scope('inputs'):
	x = tf.placeholder(tf.float32, [None, 5])
	y_true = tf.placeholder(tf.float32, [None, 2])
	data = tf.reshape(x, [-1, 5, 1])

with tf.name_scope('vars'):
	K1 = tf.Variable(tf.truncated_normal([3, 1, 4], dtype=tf.float32, stddev=0.1))
	K2 = tf.Variable(tf.truncated_normal([4, 4, 8], dtype=tf.float32, stddev=0.1))
	WC = tf.Variable(tf.truncated_normal([8, 2], dtype=tf.float32, stddev=0.1))
	B1 = tf.Variable(tf.ones([4]))
	B2 = tf.Variable(tf.ones([8]))
	BC = tf.Variable(tf.ones([2]))

with tf.name_scope('model'):
	L1 = tf.nn.relu(tf.nn.conv1d(data, K1, 1, 'SAME') + B1)
	L2 = tf.nn.relu(tf.nn.conv1d(L1, K2, 5, 'SAME') + B2)
	LF = tf.reshape(L2, [-1, 8])
	LC = tf.matmul(LF, WC) + BC
示例#15
0
   b_fc1 = bias_variable([d5], name="biases_fc1")

   h_pool4_flat = tf.reshape(h_pool4, [-1, 12*3*d4])
   h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

   # DROPOUT
   keep_prob = tf.placeholder("float")
   h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

   # READOUT LAYER
   W_fc2 = weight_variable([d5, nClasses], name="Weights_fc2")
   b_fc2 = bias_variable([nClasses], name="biases_fc2")
   y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

   # Load the dataset
   datasets = dataset.read_data_sets('noses', 'jackAndJill.csv', j)

   # Train and eval the model
   cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0)))
   tf.scalar_summary('cross entropy', cross_entropy)

   # train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(cross_entropy)
   train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)
   correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
   accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
   sess.run(tf.initialize_all_variables())


   summary_op = tf.merge_all_summaries()
   summary_writer = tf.train.SummaryWriter('/tmp/whales',
                                               graph_def=sess.graph_def)
示例#16
0
run_example = True  # will run on a mini dataset for demonstration purposes (see note above)

####  YOUR SETTINGS - END ####

if run_example:
    Conf = ConfSample
    filename_sequence = ConfSample.filename_sequence
    filename_expression = ConfSample.filename_expression
    filename_dist = ConfSample.filename_dist

train, validation, test, validation_ch3_blind, test_ch3_blind, validation_e_blind, test_e_blind = Dataset.read_data_sets(
    filename_sequence=filename_sequence,
    filename_expression=filename_expression,
    filename_labels=None,
    filename_dist=filename_dist,
    train_portion_subjects=0,
    train_portion_probes=0,
    validation_portion_subjects=0,
    validation_portion_probes=0,
    directory='../res/',
    is_prediction=True)

d = pd.read_csv('../res/' + filename_expression, nrows=1)
n_genes = len(d.columns) - 1

ff_hidden_units = [[50, 0]]
ff_n_hidden = 3
conv_filters = [64]
conv_pools = [10]
conv_strides = [10]
connected_hidden_units = [[50, 0]]
示例#17
0
def main(_):
    """Run the NN."""
    mnist = dataset.read_data_sets(FLAGS.data_dir, one_hot=True)
示例#18
0
        perm = np.arange(x.shape[0])
        np.random.shuffle(perm)
        x = x[perm]
        y = y[perm]
    n = int(test_size * x.shape[0])
    if n == 0:
        return x, y, np.array([]), np.array([])
    if test_size == 1:
        return np.array([]), np.array([]), x, y,
    return x[0:-n], y[0:-n], x[-n:x.shape[0]], y[-n:y.shape[0]]


#训练模型并保存
if __name__ == "__main__":
    print("Loading data... ")
    data_set = dataset.read_data_sets()
    train_sentences = data_set.train.sentences
    train_labels = data_set.train.labels
    embedding_weights = data_set.embedding_weights
    print("sentences_train.shape: %s labels_train.shape: %s" %
          (train_sentences.shape, train_labels.shape))
    print("embedding_weights.shape: {}".format(embedding_weights.shape))
    x_train, y_train, x_valid, y_valid = train_test_split(train_sentences,
                                                          train_labels,
                                                          test_size=0)
    model = define_model(embedding_weights)
    #训练模型
    model = train_lstm(model, x_train, y_train)
    # #验证模型
    # print('Evaluate...')
    # score = model.evaluate(x_valid, y_valid, batch_size=BATCH_SIZE)
示例#19
0
def train(batchNum = 500,
          batchSize = 200,
          ImagePatchWidth = 20,
          ImagePatchStep = 4,
          labelMode = 'PRO',
          label_mutiplier = 1.0,
          hidden_units = [200, 400, 200],
          steps = 200,
          optimizer = 'Adagrad', #"SGD", "Adam", "Adagrad"
          learning_rate = 0.001,
          clip_gradients = 5.0,
          config = None,
          verbose = 1,
          dropout = None):
    """Train deep neural-network.
    Parameters:
      hidden_units: List of hidden units per layer.
      batch_size: Mini batch size.
      steps: Number of steps to run over data.
      optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
      learning_rate: If this is constant float value, no decay function is
        used. Instead, a customized decay function can be passed that accepts
        global_step as parameter and returns a Tensor.
        e.g. exponential decay function:
        def exp_decay(global_step):
            return tf.train.exponential_decay(
                learning_rate=0.1, global_step,
                decay_steps=2, decay_rate=0.001)
      continue_training: when continue_training is True, once initialized
        model will be continuely trained on every call of fit.
      config: RunConfig object that controls the configurations of the session,
        e.g. num_cores, gpu_memory_fraction, etc.
      verbose: Controls the verbosity, possible values:
        0: the algorithm and debug information is muted.
        1: trainer prints the progress.
        2: log device placement is printed.
      dropout: When not None, the probability we will drop out a given coordinate.
    """
    print ('Training deep learning neural network ...')
    # generate data set
    trainDataset = ds.read_data_sets(
        instanceSize = ImagePatchWidth,
        stride = ImagePatchStep,
        instanceMode = 'train',
        labelMode = labelMode,
        label_mutiplier = label_mutiplier)
   
    # deep neural-network regression class (DNNRegressor)
    classifier = skflow.TensorFlowDNNRegressor(
        hidden_units = hidden_units,
        batch_size = 32,
        steps = steps,
        optimizer = optimizer, #"SGD", "Adam", "Adagrad"
        learning_rate = learning_rate,
        continue_training = True,
        clip_gradients = clip_gradients,
        config = config,
        verbose = verbose,    
        dropout = dropout)
   
    # train the DNNRegressor on generated data set
    probar = progress.progress(0, batchNum)
    gv.log_write = False
    for i in range(batchNum):
        probar.setCurrentIteration(i+1)
        probar.setInfo(
            prefix_info = 'Training ...',
            suffix_info = 'Batch: ' + str(i+1) + '/' + str(batchNum))
        probar.printProgress()
        images, labels = trainDataset.next_batch(batchSize)
        if(gv.log_write):
            classifier.fit(images, labels,
                           logdir = gv.__DIR__ + gv.tensorflow_log_dir)
        else:
            classifier.fit(images, labels)
    return classifier, trainDataset
示例#20
0
import tensorflow as tf
import pickle as pkl
import numpy as np
import dataset as Data

print "Loading Dataset.."
dataset = pkl.load(open("./packeddata/mnist.pkl"))
category = 10  #numbers of categories

pictures = dataset['train_image']

labels = dataset['train_label']

mnist = Data.read_data_sets(pictures, labels, 10, one_hot=True, reshape=False)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
示例#21
0
def doDumbNet(trainDir, valDir, trainCsv, valCsv):
   f1 = open('log_%d' % (time.time()), 'w+')
   f1.write('AAAAA\n')
   f1.write("Start %s\n" % time.time())
   f1.flush()

   #    # Force CPU only mode
   with tf.device('/cpu:0'):
      # Creates a session with log_device_placement set to True.
      # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
      sess = tf.Session()

      # Constants
      nClasses = 38
      imageSize = 256
      batchSize = 10
      learningRate = 1e-4
      dropOutValue = 0.5
      f1.write('nClasses: %d, imageSize: %d, batchSize: %d, learningRate: %e, dropOut: %f\n'
                    % (nClasses, imageSize, batchSize, learningRate, dropOutValue))
      f1.flush()
      # The size of the images is 200x150
      x = tf.placeholder("float", shape=[None, imageSize, imageSize, 1], name="Input")
      # There are 4 classes (labels)
      y_ = tf.placeholder("float", shape=[None, nClasses], name="Output")

      # CONVOLUTIONAL NEURAL NET
      # The first two dimensions are the patch size, the next is the number of input channels,
      # and the last is the number of output channels.
      # We will also have a bias vector with a component for each output channel.
      d1 = 32
      d2 = 32
      d3 = 64
      d4 = 64
      d5 = 64
      fc = 300

      W_conv1 = weight_variable([5, 5, 1, d1], name="Weights_conv1")
      b_conv1 = bias_variable([d1], name="b_conv1")

      # We then convolve x_image with the weight tensor,
      # add the bias, apply the ReLU function, and finally max pool.
      h_conv1 = tf.nn.sigmoid(conv2d(x, W_conv1) + b_conv1)
      h_pool1 = max_pool_2x2(h_conv1, name="pool1")

      # SECOND CONV LAYER
      # In order to build a deep network, we stack several layers of this type.
      # The second layer will have 64 features for each 5x5 patch.
      W_conv2 = weight_variable([5, 5, d1, d2], name="Weights_conv2")
      b_conv2 = bias_variable([d2], name="biases_conv2")

      h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
      h_pool2 = max_pool_2x2(h_conv2, name="pool2")

      # THIRD CONV LAYER
      W_conv3 = weight_variable([5, 5, d2, d3], name="Weights_conv3")
      b_conv3 = bias_variable([d3], name="biases_conv3")

      h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
      h_pool3 = max_pool_2x2(h_conv3, name="pool3")

      # h_pool3_slice = tf.slice(h_pool3, [0, 0, 0, 0], [10, 28, 28, 1])
      # h_pool3_img = tf.reshape(h_pool3_slice, [10, 28, 28, 1])
      # tf.image_summary('filtered', h_pool3_img, max_images=10)

      # FORTH CONV LAYER
      W_conv4 = weight_variable([3, 3, d3, d4], name="Weights_conv4")
      b_conv4 = bias_variable([d4], name="biases_conv4")

      h_conv4 = tf.nn.sigmoid(conv2d(h_pool3, W_conv4) + b_conv4)
      h_pool4 = max_pool_2x2(h_conv4, name="pool4")
      # FIFTH CONV LAYER
      W_conv5 = weight_variable([3, 3, d4, d5], name="Weights_conv5")
      b_conv5 = bias_variable([d5], name="biases_conv5")

      h_conv5 = tf.nn.sigmoid(conv2d(h_pool4, W_conv5) + b_conv5)
      h_pool5 = max_pool_2x2(h_conv5, name="pool5")

      # DENSELY CONNECTED LAYER
      # Now that the image size has been reduced to 7x7,
      # we add a fully-connected layer with 1024 neurons to allow processing on the entire image.
      # We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight
      # matrix, add a bias, and apply a ReLU.

      W_fc1 = weight_variable([8 * 8 * d5, fc], name="Weights_fc1")
      b_fc1 = bias_variable([fc], name="biases_fc1")

      h_conv5_flat = tf.reshape(h_pool5, [-1, 8 * 8 * d5])
      h_fc1 = tf.nn.sigmoid(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)

      # DROPOUT
      keep_prob = tf.placeholder("float")
      h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

      # READOUT LAYER
      W_fc2 = weight_variable([fc, nClasses], name="Weights_fc2")
      b_fc2 = bias_variable([nClasses], name="biases_fc2")
      y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

      # Load the dataset
      datasets = dataset.read_data_sets(trainDir, valDir, trainCsv, valCsv)

      # Train and eval the model
      cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y_conv, 1e-10, 1.0)))
      tf.scalar_summary('cross entropy', cross_entropy)

      # train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(cross_entropy)
      train_step = tf.train.AdamOptimizer(learningRate).minimize(cross_entropy)
      correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
      sess.run(tf.initialize_all_variables())

      #    summary_op = tf.merge_all_summaries()
      #    summary_writer = tf.train.SummaryWriter('/tmp/whales', graph_def=sess.graph_def)
      saver = tf.train.Saver()
      # saver.restore(sess, 'my-model-batch1-10000')
      # utility = utilities.Utility(datasets, sess, nClasses, x, y_, keep_prob)

      for i in xrange(5000):
         step_start = time.time()

         batch = datasets.train.get_sequential_batch(batchSize)
         train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1}, session=sess)

         if i % 25 == 0:
            # evaluate accuracy on random 100 samples from train set
            batch = datasets.train.get_random_batch(100)
            f1.write("step %d finished, time = %s\n" % (i, time.time() - step_start))
            # acc, cross_entropyD, summary_str = sess.run([accuracy, cross_entropy, summary_op],
            #                                feed_dict={x: batch[0], y_: yTrain, keep_prob: 1})
            acc, cross_entropyD, yD = sess.run([accuracy, cross_entropy, y_conv],
                                           feed_dict={x: batch[0], y_: batch[1], keep_prob: 1})
            f1.write("Cross entropy = " + str(cross_entropyD) + "\n")
            f1.write("Accuracy = " + str(acc) + "\n")
            f1.write("Y = " + str(np.argmax(yD, axis=1)) + "\n")
            f1.write("\n--- %s seconds ---\n\n" % (time.time() - start_time))
            f1.flush()
            # summary_writer.add_summary(summary_str, i)
            # utility.draw(h_pool1, 113, 113, 6, 6)

         f1.write("\nstep %d finished, %d seconds \n" % (i, time.time() - step_start))
         f1.flush()

   saver.save(sess, 'my-model-%d' % (time.time()), global_step=10000)  # Evaluate the prediction
   test = datasets.validation.getAll()
   acc, y_convD, correct_predictionD = sess.run([accuracy, y_conv, correct_prediction],
                                                feed_dict={x: test[0], y_: test[1], keep_prob: 1.0})
   f1.write("Accuracy = " + str(acc) + "\n")
   f1.write("Correct prediction %d\n" % (sum(correct_predictionD)))
   f1.write("y %s\n" % str(test[1]))
   f1.write("y from net %s\n" % str(np.argmax(y_convD, axis=1)))
   f1.write("\n--- %s seconds ---\n\n" % (time.time() - start_time))
   f1.flush()
   f1.close()
示例#22
0
load_model_ID = 0  # see explanation above
test_time = False  # when you're ready for testing after using save_models below (our code handles the random splitting of the data into train/val/test)
save_models = False  # if you want to save the model while training (saved upon validation improvement or > 90 minutes)
sample = True  # will run on a mini dataset for demonstration purposes (see note above)

####  YOUR SETTINGS - END ####

if sample:
    Conf = ConfSample

train, validation, test, validation_ch3_blind, test_ch3_blind, validation_e_blind, test_e_blind = Dataset.read_data_sets(
    filename_sequence=Conf.filename_sequence,
    filename_expression=Conf.filename_expression,
    filename_labels=Conf.filename_labels,
    filename_dist=Conf.filename_dist,
    train_portion_subjects=Conf.train_portion_probes,
    train_portion_probes=Conf.train_portion_probes,
    validation_portion_subjects=Conf.validation_portion_subjects,
    validation_portion_probes=Conf.validation_portion_probes,
    directory='../res/',
    load_model_ID=load_model_ID)

d = pd.read_csv('../res/' + Conf.filename_expression, nrows=1)
n_genes = len(d.columns) - 1

learning_rates = [0.001, 0.0001, 0.00001, 0.000001, 0.0000001, 0.00000001]
n_runs = range(len(learning_rates))
ff_hidden_units = [[50, 0] for i in n_runs]
ff_n_hidden = 3
conv_filters = [64 for i in n_runs]
conv_pools = [10 for i in n_runs]
示例#23
0
import tensorflow as tf
import os

import dataset
import cnn_model

data = dataset.read_data_sets(one_hot=True)

print("Images loaded..")

learning_rate = 0.001
training_iterations = 300000
batch_size = 28
display_step = 10
beta = 0.001

n_classes = 43
dropout = 0.5

# tf Graph input
x = tf.placeholder(tf.float32, [None, 32, 32, 3], "x")
y = tf.placeholder(tf.float32, [None, n_classes], "y")

keep_prob = tf.placeholder(tf.float32)

weights = {
    'wc1':
    tf.get_variable('wc1',
                    shape=(5, 5, 3, 16),
                    initializer=tf.contrib.layers.xavier_initializer()),
    'wc2':
import sys
import matplotlib.pyplot as plt 
import numpy as np 
import math 
import tensorflow as tf 
import tensorflow.examples.tutorials.mnist.input_data as input_data 
import dataset
#LOAD PACKAGES 
print 'Path in the argument:', str(sys.argv[1])
mnist = dataset.read_data_sets(str(sys.argv[1]), one_hot=True) 
# mnist = input_data.read_data_sets("data/", one_hot=True) 


trainimgs   = mnist.train.images 
print ("trainimgs")
print (trainimgs.shape)

# print ('Mnist train 1st image:',trainimgs.shape)
# trainlabels = mnist.train.labels 
# print ("trainlabels")
# print (trainlabels.shape)
testimgs    = mnist.test.images
print ("testimgs")
print (testimgs.shape)
testlabels  = mnist.test.labels 
print ("testlabels")
print (testlabels.shape)
ntrain      = trainimgs.shape[0] 
print ("ntrain")
print (ntrain)
ntest       = testimgs.shape[0] 
def trainModel(expDir='null', ii=0):
    config = ConfigParser()
    config.read(expDir + 'input_configuration')

    mode = config.get('MAIN_PARAMETER_SETTING', 'mode')
    l_rate = config.getfloat('MAIN_PARAMETER_SETTING', 'learning_rate')
    momentum = config.getfloat('MAIN_PARAMETER_SETTING', 'momentum')
    gamma = config.getfloat('MAIN_PARAMETER_SETTING', 'gamma')
    p_input = config.getfloat('MAIN_PARAMETER_SETTING', 'p_input')
    p_conv = config.getfloat('MAIN_PARAMETER_SETTING', 'p_conv')
    p_fc = config.getfloat('MAIN_PARAMETER_SETTING', 'p_fc')
    noise = config.getfloat('MAIN_PARAMETER_SETTING', 'noise')
    numepochs = config.getint('MAIN_PARAMETER_SETTING', 'training_epochs')
    dataset_name = config.get('MAIN_PARAMETER_SETTING', 'dataset_name')

    dataPath = utils.dataPathFromName(dataset_name)
    mnist = dataset.read_data_sets(data_dir=dataPath)

    if mode == 'scheduled_dropout':

        def _prob(x, gamma, p):
            return (1. - p) * np.exp(-gamma * x) + p
    elif mode == 'ann_dropout':

        def _prob(x, gamma, p):
            return -(1. - p) * np.exp(-gamma * x) + 1
    elif mode == 'regular_dropout':

        def _prob(x, gamma, p):
            return p

    sess = tf.InteractiveSession()
    #(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
    #config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.4)))

    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    # DROPOUT
    # placeholder for the probability that a neuron's output is kept during dropout
    # keep_prob will be give to feed_dict to control the dropout rate
    keep_prob_input = tf.placeholder(tf.float32)
    keep_prob_conv = tf.placeholder(tf.float32)
    keep_prob_fc = tf.placeholder(tf.float32)

    # FIRST CONV LAYER
    #dim_conv1 = int(96/p_conv)
    dim_conv1 = 32
    # The convolutional layer computes 64 features for each 5x5 patch.
    # Its weight tensor has a shape of [5, 5, 3, 64] [5x5 patch, input channels, output channels]
    W_conv1 = weight_variable([5, 5, 1, dim_conv1], noise)
    #  bias vector with a component for each output channel
    b_conv1 = bias_variable([dim_conv1], noise)
    #To apply the layer, we first reshape x to a 4d tensor
    # Second and third dimensions correspond to image width and height
    # Final dimension corresponding to the number of color channels.
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    x_image_drop = tf.nn.dropout(x_image, keep_prob_input)
    # convolve x_image with the weight tensor, add the bias, apply ReLU
    h_conv1 = tf.nn.relu(conv2d(x_image_drop, W_conv1) + b_conv1)
    # finally max pool
    h_pool1 = max_pool_2x2(h_conv1)  # now the image is 14*14
    h_pool1_drop = tf.nn.dropout(h_pool1, keep_prob_conv)

    # SECOND CONV LAYER
    # Initialize variables
    #dim_conv2 = int(128/p_conv)
    dim_conv2 = 48
    W_conv2 = weight_variable([5, 5, dim_conv1, dim_conv2], noise)
    b_conv2 = bias_variable([dim_conv2], noise)
    # Contruct the graph
    h_conv2 = tf.nn.relu(conv2d(h_pool1_drop, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)  # now the image is 7*7
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * dim_conv2])
    h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob_conv)

    #~ ## THIRD CONV LAYER
    #~ # Initialize variables
    #~ #dim_conv3 = int(256/p_fc)
    #~ dim_conv3 = 256
    #~ W_conv3 = weight_variable([5, 5, dim_conv2, dim_conv3], noise)
    #~ b_conv3 = bias_variable([dim_conv3],noise)
    #~ # Contruct the graph
    #~ h_conv3 = tf.nn.relu(conv2d(h_pool2_drop, W_conv3) + b_conv3)
    #~ h_pool3 = max_pool_3x3(h_conv3) # now the image is 4*4?

    #~ h_pool3_flat = tf.reshape(h_pool3, [-1, 4 * 4 * dim_conv3])
    #~ h_pool3_flat_drop = tf.nn.dropout(h_pool3_flat, keep_prob_conv)

    # DENSE LAYER 1
    #DIM_1 = int(2048/p_fc)
    DIM_1 = 2048
    W_fc1 = weight_variable([7 * 7 * dim_conv2, DIM_1], noise)
    b_fc1 = bias_variable([DIM_1], noise)
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat_drop, W_fc1) + b_fc1)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob_fc)

    # DENSE LAYER 2
    #DIM_2 = int(2048/p_fc)
    DIM_2 = 1024
    W_fc2 = weight_variable([DIM_1, DIM_2], noise)
    b_fc2 = bias_variable([DIM_2], noise)
    h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob_fc)

    # READOUT LAYER
    W_out = weight_variable([DIM_2, 10], noise)
    b_out = bias_variable([10], noise)
    y_conv = tf.matmul(h_fc2_drop, W_out) + b_out

    # Loss Function for evaluation (i.e. compare with actual labels)
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))

    droppedOutLayers = [h_pool1, h_pool2_flat, h_fc1, h_fc2]

    # the actual operation on the graph
    train_step = tf.train.AdamOptimizer(l_rate,
                                        beta1=momentum).minimize(cross_entropy)
    #train_step = tf.train.GradientDescentOptimizer(1e-4,beta1=0.999).minimize(cross_entropy)

    # EVALUATE THE MODEL
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    #correct_prediction = tf.equal(tf.nn.top_k(y_conv,2)[1], tf.nn.top_k(y_,2)[1])
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    #AP  = sparse_average_precision_at_k(y_conv, tf.cast(y_, tf.int64), 1)
    #mAP =  tf.reduce_mean(tf.cast(AP, tf.float32))

    #################################################

    # Run the session to initialize variables
    sess.run(tf.global_variables_initializer())
    #sess.run(tf.local_variables_initializer())

    # Keep some history
    accTrSet = []
    accTeSet = []
    accValSet = []
    Xentr = []
    gammaValues = []

    # Some training params
    TRAIN_EVAL = True
    TEST_EVAL = True
    VALID = True
    batchsize = 128
    eval_batchsize = 200

    num_train_batches = mnist.train.labels.shape[0] / batchsize
    numiter = numepochs * num_train_batches
    num_test_batches = mnist.test.labels.shape[0] / eval_batchsize
    #num_valid_batches = mnist.validation.labels.shape[0] / eval_batchsize
    print("Epochs: %d \t Training batches: %d \t Iterations: %d \t Mode: %s"\
                    %(numepochs, num_train_batches, numiter, mode))

    start_time = time.time()
    ## TRAINING ITERATIONS
    for i in range(int(numiter)):

        # Dropout probabilities for this iteration
        _prob_input = _prob(i, gamma, p_input)
        _prob_conv = _prob(i, gamma, p_conv)
        _prob_fc = _prob(i, gamma, p_fc)
        gammaValues.append(_prob_fc)

        ###################################################
        # calculate accuracies and cost every 500 iterations
        if i % 100 == 0 and i != 0:
            ##############################################
            # calculate TRAIN  accuracy on the SINGLE BATCH
            #train_accuracy, xentropy = sess.run((accuracy, cross_entropy),
            #feed_dict={x:batch[0], y_: batch[1],
            #keep_prob_input: 1.0, keep_prob_conv: 1.0, keep_prob_fc: 1.0}) # no dropout
            #accTrSet.append(train_accuracy)
            #Xentr.append(xentropy)

            ##############################################
            # calculate TRAINING accuracy on the whole training set
            train_accuracy = 0.
            xentropy = 0.
            if TRAIN_EVAL:
                for j in range(
                        int(num_train_batches)):  # Must be done batchwise
                    batch = mnist.train.next_batch(batchsize)
                    t_a, x_e = sess.run(
                        (accuracy, cross_entropy),
                        feed_dict={
                            x: batch[0],
                            y_: batch[1],
                            keep_prob_input: 1.0,
                            keep_prob_conv: 1.0,
                            keep_prob_fc: 1.0
                        })  # no dropout

                    train_accuracy += t_a
                    xentropy += x_e
                train_accuracy = train_accuracy / num_train_batches
                xentropy = xentropy / num_train_batches
                accTrSet.append(train_accuracy)
                Xentr.append(xentropy)

            ##############################################
            # calculate TEST accuracy on the whole test set
            test_accuracy = 0.
            if TEST_EVAL:
                for j in range(
                        int(num_test_batches)):  # Must be done batchwise
                    batch = mnist.test.next_batch(eval_batchsize)
                    test_accuracy += accuracy.eval(
                        feed_dict={
                            x: batch[0],
                            y_: batch[1],
                            keep_prob_input: 1.0,
                            keep_prob_conv: 1.0,
                            keep_prob_fc: 1.0
                        })  # no dropout
                test_accuracy = test_accuracy / num_test_batches
                accTeSet.append(test_accuracy)

            ##############################################
            # Perform validation for early stopping
            valid_accuracy = 0.
            if VALID:
                # calculate VALIDATION accuracy on the whole validation set
                valid_accuracy = accuracy.eval(
                    feed_dict={
                        x: mnist.validation.images,
                        y_: mnist.validation.labels,
                        keep_prob_input: 1.0,
                        keep_prob_conv: 1.0,
                        keep_prob_fc: 1.0
                    })  # no dropout
                accValSet.append(valid_accuracy)
                #print("Droput prob: %f"%(prob))

                ## Early stopping
                #if len(accValSet)>5 and accValSet[-1]<accValSet[-2]:
                #break

            duration = time.time() - start_time
            start_time = time.time()
            print("step %d: \t cross entropy: %f \t training accuracy: %f \t test accuracy: %f \t valid accuracy: %f \t prob: %f \t time: %f"\
                            %(i,  xentropy, train_accuracy, test_accuracy, valid_accuracy, _prob_fc, duration))

        ## The actual training step
        # SCHEDULING DROPOUT: no droput at first, tends to 0.5 as iterations increase
        batch = mnist.train.next_batch(batchsize)

        activations = sess.run(droppedOutLayers,
                               feed_dict={
                                   x: batch[0],
                                   y_: batch[1]
                               })

        train_step.run(
            feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob_input: _prob_input,  #0.9
                keep_prob_conv: _prob_conv,  #0.75
                keep_prob_fc: _prob_fc
            })  #0.5  # CHANGE HERE

    #!# End of training iterations #

    # Finally test on the test set #
    ## Testing on small gpus must be done batch-wise to avoid OOM
    test_accuracy = 0
    for j in range(num_test_batches):
        batch = mnist.test.next_batch(batchsize)
        test_accuracy += accuracy.eval(
            feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob_input: 1.0,
                keep_prob_conv: 1.0,
                keep_prob_fc: 1.0
            })  # no dropout

    print("test accuracy: %g" % (test_accuracy / num_test_batches))

    f = file(expDir + str(ii) + 'accuracies.pkl', 'w')
    cPickle.dump((accTrSet, accValSet, accTeSet, Xentr),
                 f,
                 protocol=cPickle.HIGHEST_PROTOCOL)
    f.close()

    sess.close()
示例#26
0
import tensorflow as tf
import pickle as pkl
import numpy as np
import dataset as Data

print "Loading Dataset.."
dataset =pkl.load(open("./packeddata/full.pkl"))
category=dataset['category']  #numbers of categories
pictures=dataset['pictures']
labels=dataset['labels']

mathset = Data.read_data_sets(pictures,labels,category, one_hot=True,reshape=True)

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')


sess = tf.InteractiveSession()

x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, category])
示例#27
0
			x_batch, y_true_batch = data.trainSet.next_batch(batch_size)
			feed_dict = {x: x_batch, y_true: y_true_batch, dropout_prob: dropout_p}
			session.run(optimizer, feed_dict)
			if current_epoch != data.trainSet.done_epoch:
				current_epoch += 1
				print ('Epoch: {0}'.format(data.trainSet.done_epoch))
			# Validate
			if i % 20 == 0 :
				x_batch, y_true_batch = data.validationSet.next_batch(batch_size*4)
				feed_dict = {x: x_batch, y_true: y_true_batch, dropout_prob: 1.0}
				print ('Loss: {0[0]:.4f} ~ Acc: {0[1]:.4f} '.format(session.run([loss, acc], feed_dict)))
				summary = session.run(merged_summary, feed_dict)
				writer.add_summary(summary, i)
				saver.save(session, save_dir)
				tf.train.write_graph(session.graph, graph_dir, 'train.pb', as_text=False)


def parse_args():
	parser = ap.ArgumentParser()
	parser.add_argument("--train", type=str, help="Path to train set")
	parser.add_argument("--validation", type=str, help="Path to validation set")
	parser.add_argument("--save_dir", type=str, help="Path to folder where to save checkpoint data")
	parser.add_argument("--graph_dir", type=str, help="Path to folder where to save checkpoint data")
	parser.add_argument("--max_iter", type=int, help="Path to folder where to save checkpoint data")
	return parser.parse_args()

if __name__ == "__main__":
	args = parse_args()
	data = ds.read_data_sets(args.train, args.validation, classes)
	train(args.max_iter, data, args.save_dir, args.graph_dir)
def run_train():
    """Train CAPTCHA for a number of steps."""

    test_data = dataset.read_data_sets(
        dataset_dir='/home/sw/Documents/rgb-nir2/qd_fang2_9_8/field_2ch.npz')
    with tf.Graph().as_default():
        train_reader = Reader(
            '/home/sw/Documents/rgb-nir2/qd_fang2_9_8/country_2ch.tfrecord',
            name='train_data',
            batch_size=BATCH_SIZE)
        leftIMG, rightIMG, labels_op = train_reader.feed()  #[64,128]

        images_pl1, images_pl2, labels_pl = placeholder_inputs(BATCH_SIZE)
        conv_features1, features1 = model.get_features(images_pl1, reuse=False)
        conv_features2, features2 = model.get_features(images_pl2, reuse=True)
        predicts = tf.sqrt(
            tf.reduce_sum(tf.square(features1 - features2), axis=1))

        total_loss = model.caculate_loss(conv_features1, conv_features2,
                                         features1, features2)
        tf.summary.scalar('sum_loss', total_loss)
        train_op = model.training(total_loss)

        summary = tf.summary.merge_all()
        saver = tf.train.Saver(max_to_keep=50)
        #    init_op = tf.global_variables_initializer()

        sess = tf.Session()
        summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
        #    sess.run(init_op)
        saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            max_step = 500000
            for step in range(390380, max_step):
                start_time = time.time()
                lefts, rights, batch_labels = sess.run(
                    [leftIMG, rightIMG, labels_op])
                _, summary_str, loss_value = sess.run([train_op, summary, total_loss], \
                                                                             feed_dict={images_pl1:lefts, images_pl2:rights, labels_pl:batch_labels})

                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
                duration = time.time() - start_time
                if step % 10 == 0:
                    logging.info(
                        '>> Step %d run_train: loss = %.4f  (%.3f sec)' %
                        (step, loss_value, duration))
                    #-------------------------------
                if step % 1000 == 0:
                    logging.info('>> %s Saving in %s' %
                                 (datetime.now(), checkpoint_dir))

                    saver.save(sess, checkpoint_file, global_step=step)

                    logging.info('Test Data Eval:')
                    do_eval(sess,
                            step,
                            predicts,
                            images_pl1,
                            images_pl2,
                            labels_pl,
                            test_data,
                            name='notredame')

        except KeyboardInterrupt:
            print('INTERRUPTED')
            coord.request_stop()

        finally:
            saver.save(sess, checkpoint_file, global_step=step)
            print('\rModel saved in file :%s' % checkpoint_dir)
            coord.request_stop()
            coord.join(threads)

        sess.close()
示例#29
0
def graph_names(graph):
    [n.name for n in tf.get_default_graph().as_graph_def().node]


def test(dataset, checkpoint, checkpoint_dir):
    sess = tf.Session()
    saver = tf.train.import_meta_graph(checkpoint)
    saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
    graph = tf.get_default_graph()

    input_node = graph.get_tensor_by_name('inputs/input:0')
    dropp_node = graph.get_tensor_by_name('inputs/dropout_prob:0')
    predc_node = graph.get_tensor_by_name('model/Softmax:0')

    input_batch, true_batch = dataset.validationSet.next_batch(1)

    feed_dict = {input_node: input_batch, dropp_node: 1.0}

    result = sess.run(predc_node, feed_dict)

    print(result)
    print(true_batch)


if __name__ == "__main__":
    args = parse_args()
    data = ds.read_data_sets(args.train, args.validation,
                             ['back', 'empty', 'front', 'side'])
    test(data, args.chkp, args.chkp_dir)
示例#30
0
def load_data(data_dir="/usr/local/data/"):
    return dataset.read_data_sets(data_dir)
示例#31
0
def train():
    #Load dataset
    data = dataset.read_data_sets()

    obs_shape = (imageSize, imageSize, 3)
    num_class = 2
    x = tf.placeholder(tf.float32, shape=(None, ) + obs_shape)
    y = tf.placeholder(tf.float32, (None, num_class))
    model = CNNModel(x, y)
    ''' 
    The batch size is the number of images given to the network at a time. The more memory 
    your gpu has the larger you can make your batch size, which can effect train time.
    The network is given batchSize/2 positive and batchSize/2 negative images. Since we 
    consider an epoch a complete pass of the negative images, you will only move trough 
    an epoch at batchSize/2 intervals
    '''
    batchSize = 100
    numEpochs = 2000
    learningRate = 0.5e-4

    optimizer = tf.train.AdamOptimizer(learningRate).minimize(model.loss)
    saver = tf.train.Saver(tf.trainable_variables())
    bestTestAccuracySoFar = 0
    with tf.Session() as sess:
        print('Starting training')

        sess.run(tf.global_variables_initializer())
        if restoreFromCheckpoint:
            saver.restore(sess, checkpointFile)
        for epoch in range(numEpochs):

            begin = time.time()

            print "Starting epoch", epoch
            #Training
            train_accuracies = []
            while True:
                batch = data.train.next_batch(batchSize, flip=True)
                feed_dict = {x: batch[0], y: batch[1], model.keep_prob: 0.5}
                _, acc = sess.run([optimizer, model.accuracy],
                                  feed_dict=feed_dict)
                train_accuracies.append(acc)
                #If done with epoch
                if batch[2]:
                    break

            print "Finished training for epoch ", epoch
            print "Evaluating test accuracy..."

            batchNum = 0
            test_accuracies = []
            #Testing
            while True:
                batch = data.test.next_batch(batchSize)
                #We do not pass in an optimizer to the run function and we set the keep_prob to 1.0 here because we are only evalutaing
                feed_dict = {x: batch[0], y: batch[1], model.keep_prob: 1.0}
                correct, acc = sess.run(
                    [model.correct_prediction, model.accuracy],
                    feed_dict=feed_dict)
                test_accuracies.append(acc)
                index = 0
                if writeResults:
                    for negorpos, img, isCorrect in zip(
                            batch[1], batch[0], correct):
                        #If negative
                        if negorpos[1] == 1:
                            if isCorrect:
                                cv2.imwrite(
                                    "Results/Negatives/Correct/" +
                                    str(batchNum) + "_" + str(index) + ".jpg",
                                    cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                            else:
                                cv2.imwrite(
                                    "Results/Negatives/Incorrect/" +
                                    str(batchNum) + "_" + str(index) + ".jpg",
                                    cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                        else:  #Else positive
                            if isCorrect:
                                cv2.imwrite(
                                    "Results/Positives/Correct/" +
                                    str(batchNum) + "_" + str(index) + ".jpg",
                                    cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                            else:
                                cv2.imwrite(
                                    "Results/Positives/Incorrect/" +
                                    str(batchNum) + "_" + str(index) + ".jpg",
                                    cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
                        index += 1
                batchNum += 1
                #If done with epoch
                if batch[2]:
                    break

            train_acc_mean = np.mean(train_accuracies)
            test_acc_mean = np.mean(test_accuracies)

            print "Epoch=", epoch, ", time =", time.time(
            ) - begin, ", train accuracy=", train_acc_mean, "test accuracy=", test_acc_mean

            if (test_acc_mean > bestTestAccuracySoFar):
                bestTestAccuracySoFar = test_acc_mean
                saver.save(
                    sess, "./Best_Checkpoints/model_with_test_accuracy_" +
                    str(test_acc_mean) + "_.ckpt")
                print "Saved checkpoint to Best_Checkpoints directory since this epoch had the best test accuracy so far"
            else:
                print "Not saving checkpoint to disk since its test accuracy was not the best so far"
        sess.close()
        os._exit(1)
import tensorflow as tf
from tensorflow.python.framework.ops import reset_default_graph
#import utils
import logging
import dataset
import os

sess = tf.InteractiveSession()
ds = dataset.read_data_sets('/home/nikhil/data/', one_hot=True)
print(ds.train.images.shape)
print(ds.train.labels.shape)
print(ds.test.images.shape)
print(ds.test.labels.shape)
x = tf.placeholder(tf.float32, shape=[None, 3584])
y_ = tf.placeholder(tf.float32, shape=[None, 2])


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

示例#33
0
from sklearn import svm
import numpy as np

# Hyper Parameters
sample_amount = 200
da = np.zeros((10, 10))

# Data Feed
# 784 (reshape=True) | 28*28 (reshape=False)
image_reshape = False
data_dir = "/home/ziyi/code/data/"

for i in range(10):
    mnist0 = dataset.read_data_sets(data_dir,
                                    target_class=i,
                                    one_hot=False,
                                    reshape=image_reshape,
                                    sample_vol=sample_amount)
    for j in range(10):
        if i == j: continue
        # print(mnist1.train.images.shape, mnist1.train.num_examples)

        mnist1 = dataset.read_data_sets(data_dir,
                                        target_class=j,
                                        one_hot=False,
                                        reshape=image_reshape,
                                        sample_vol=sample_amount * 10)

        train_images = np.concatenate(
            [mnist0.train.images, mnist1.train.images], 0)
        train_labels = np.concatenate([
示例#34
0
def test(classifier,
         filename,
         ImagePatchWidth = 20,
         ImagePatchStep = 4,
         labelMode = 'PRO',
         label_mutiplier = 1.0,
         plot_show = 1,
         save_image = True):
    """ label image with trained deep neural-network
    """
    if(labelMode == 'NUM' and ImagePatchStep < ImagePatchWidth):
        ImagePatchStep = ImagePatchWidth
    
    # generate test data
    testDS = ds.read_data_sets(
        instanceSize = ImagePatchWidth,
        stride = ImagePatchStep,
        instanceMode = 'test',
        labelMode = labelMode,
        imageName = filename,
        label_mutiplier = label_mutiplier)

    # decide batch number and batch size according to memory requirement
    memory_limit = gv.MEM_LIM
    batch_size = np.floor(memory_limit / (ImagePatchWidth**2*3) / 4 / 3)
    batch_num  = int(np.ceil(
        np.true_divide(testDS.xlength * testDS.ylength, batch_size)))

    # labeling
    _y = testDS.labels                                 # correct labels
    y  = np.zeros((testDS.num_examples,1))             # label results
    PROGRESS = progress.progress(0, batch_num)
    for j in range(batch_num):
        PROGRESS.setCurrentIteration(j+1)
        PROGRESS.setInfo(prefix_info = 'Labeling ... ', suffix_info = filename)
        PROGRESS.printProgress()
        start = testDS.index_in_epoch
        if (start + batch_size > testDS.num_examples) :
            end = testDS.num_examples
        else:
            end = start + batch_size
        batch_images, _ = testDS.next_batch(end-start)        
        y[int(start):int(end)] = classifier.predict(
            batch_images, batch_size = 1024)
   
    # benchmark
    correctNumber =  np.array([
        np.sum(y == _y),                              # all correct number 
        np.sum(np.all([y == _y, _y == 0], axis = 0)), # correct negtive
        np.sum(np.all([y == _y, _y >  0], axis = 0)), # correct positive
        np.sum(np.absolute(np.subtract(y, _y)))])     # distance   
    totalInstanceNumber = _y.size                     # number of instances
    
    # save image
    image_data = np.reshape(y, (testDS.ylength, testDS.xlength))
    if(gv.dp_image_save and save_image):       
        img_save = image_data - np.amin(image_data)
        img_save = img_save / np.amax(img_save)
        io.imsave(gv.__DIR__ + gv.dp__image_dir + filename, img_save)
  
    # show image
    if(plot_show == 1):
        fig, ax = plt.subplots(1,2)
        ax[0].set_title('Original Image')
        img = io.imread(gv.__DIR__ + gv.__TrainImageDir__ + filename)
        ax[0].imshow(img)
        ax[1].set_title('Labeling Result')
        ax[1].imshow(image_data)
        plt.show()
    
    return [np.sum(y), correctNumber, totalInstanceNumber]
示例#35
0
def doAlexNet(trainDir, valDir, trainCsv, valCsv):

   f1=open('log_%d' % (time.time()), 'w+')
   f1.write('AAAAA\n')
   f1.write("Start %s\n" % time.time())
   f1.flush()
   # Force CPU only mode
   with tf.device('/cpu:0'):
      # Creates a session with log_device_placement set to True.
      # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
      sess = tf.Session()

      # Constants
      nClasses = 38
      imageSize = 227
      starter_learning_rate = 1e-5
      batchSize = 10
      dropOutValue = 1
      log('nClasses: %d, imageSize: %d, batchSize: %d, learningRate: %e, dropOut: %f\n'
                    % (nClasses, imageSize, batchSize, starter_learning_rate, dropOutValue))

      # The size of the images is 227x227
      x = tf.placeholder("float", shape=[None, imageSize, imageSize, 1], name="Input")
      # There are 4 classes (labels)
      y_ = tf.placeholder("float", shape=[None, nClasses], name = "Output")

      # FIRST SUBLAYER (96 features, 1 convolution)
      w_conv1 = weight_variable([11, 11, 1, 96], name="Weights_conv1")
      b_conv1 = bias_variable([96], name="b_conv1")


      # We then convolve x_image with the weight tensor,
      # add the bias, apply the ReLU function (repeat from step 1) and finally max pool.
      h_conv1 = activation(tf.nn.conv2d(x, w_conv1, strides=[1, 4, 4, 1], padding='VALID') + b_conv1)
      h_pool1 = max_pool_3x3(h_conv1, name="pool1")

      # SECOND SUBLAYER (256 features, 1 convolution)
      w_conv2 = weight_variable([5, 5, 96, 256], name="Weights_conv2")
      b_conv2 = bias_variable([256], name="b_conv2")

      h_conv2 = tf.nn.relu(conv2d(normalize(h_pool1), w_conv2)  + b_conv2)
      h_pool2 = max_pool_3x3(h_conv2, name="pool2")

      # THIRD SUBLAYER (384 features, 3 convolutions)
      w_conv31 = weight_variable([3, 3, 256, 384], name="Weights_conv31")
      b_conv31 = bias_variable([384], name="b_conv31")
      w_conv32 = weight_variable([3, 3, 384, 384], name="Weights_conv32")
      b_conv32 = bias_variable([384], name="b_conv32")
      w_conv33 = weight_variable([3, 3, 384, 256], name="Weights_conv33")
      b_conv33 = bias_variable([256], name="b_conv33")

      h_conv31 = activation(conv2d(normalize(h_pool2), w_conv31)  + b_conv31)
      h_conv32 = activation(conv2d(normalize(h_conv31), w_conv32) + b_conv32)
      h_conv33 = activation(conv2d(normalize(h_conv32), w_conv33) + b_conv33)
      h_pool3 = max_pool_3x3(h_conv33, name="pool3")

      # DENSELY CONNECTED LAYER
      # Now that the image size has been reduced to 6x6,
      # we add a fully-connected layer with 4096 neurons to allow processing on the entire image.
      # We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight
      # matrix, add a bias, and apply a ReLU.

      ## Fully connected layers
      # FC:           [1x1x4096]      fc1 (with dropout)
      # FC:           [1x1x4096]      fc2 (with dropout)
      # FC:           [1x1x38]        fc3 (to output)

      # Fully connected layer 1 (4096 neurons)
      w_fc1 = weight_variable([6*6*256, 4096], name="Weights_fc1")
      b_fc1 = bias_variable([4096], name="biases_fc1")

      h_pool3_flat = tf.reshape(h_pool3, [-1, 6*6*256])
      h_fc1 = activation(tf.matmul(h_pool3_flat, w_fc1) + b_fc1)
      # Dropout of fc1 (dropout keep probability of 0.5)
      keep_prob = tf.placeholder("float")
      h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

      # Fully connected layer 2
      w_fc2 = weight_variable([4096, 4096], name="Weights_fc2")
      b_fc2 = bias_variable([4096], name="biases_fc2")
      h_fc2 = activation(tf.matmul(h_fc1_drop, w_fc2) + b_fc2)
      h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)

      # Readout layer
      w_fc3 = weight_variable([4096, nClasses], name="Weights_fc3")
      b_fc3 = bias_variable([nClasses], name="biases_fc3")
      y_conv = tf.nn.softmax(tf.matmul(h_fc2_drop, w_fc3) + b_fc3)

      # Load the dataset
      datasets = dataset.read_data_sets(trainDir, valDir, trainCsv, valCsv)

      # Train and eval the model
      cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0)))

      # Exponentially decaying learning rate
      global_step = tf.Variable(0, trainable=False)
      learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100, 0.98, staircase=True)
      # Passing global_step to minimize() will increment it at each step.

      # Optimizer
      train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy, global_step=global_step)

      correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
      sess.run(tf.initialize_all_variables())

      saver = tf.train.Saver()

      # saver.restore(sess, 'my-model-batch1-10000')

      for i in xrange(1500):
         step_start = time.time()

         batch = datasets.train.get_sequential_batch(batchSize)

         train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob:dropOutValue}, session=sess)

         if i%25 == 0:
            train_accuracy, cross_entropyD,\
            yD, currentLearningRate \
            = sess.run([accuracy, cross_entropy, y_conv, learning_rate],
                        feed_dict={x: batch[0], y_: batch[1], keep_prob: 1})
            log("step: %d, training accuracy: %f, time: %d"%(i, train_accuracy, time.time() - step_start))
            log("train cross entropy: %f"%(cross_entropyD))
            log("learning rate = %.10f" % (currentLearningRate));
            log("y     = %s"%(str(np.argmax(yD, axis=1))))
            log("yReal = %s"%(str(np.argmax(batch[1], axis=1))))
            # log("validation accuracy: %g"%accuracy.eval(feed_dict={x:  validation[0], y_: validation[1], keep_prob: 1.0}))

         if i%200 == 0 and i != 0:
            saver.save(sess, 'alexnet-model-%d' % (time.time()), global_step=global_step)

         log('step: %d, time: %d\n' % (i, time.time() - step_start))
示例#36
0
def run_train():
    """Train CAPTCHA for a number of steps."""
    train_data = dataset.read_data_sets()

    with tf.Graph().as_default():
        images_placeholder, y_placeholder, z_placeholder = placeholder_inputs()

        d_logits_real, d_logits_fake = model.inference(images_placeholder,
                                                       z_placeholder,
                                                       y_placeholder)
        demo_noise = np.random.uniform(-1, 1, size=(100, 100))
        demo_label = get_demo_label()
        demo_img = model.generator(z_placeholder, y_placeholder, reuse=True)

        g_loss, d_loss = model.loss(d_logits_real, d_logits_fake)
        tf.summary.scalar('g_loss', g_loss)
        tf.summary.scalar('d_loss', d_loss)
        summary = tf.summary.merge_all()
        train_op = model.get_optimizer(g_loss, d_loss)
        saver = tf.train.Saver()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        sess = tf.Session()
        summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
        sess.run(init_op)

        try:
            max_step = 100 * 70000 // batch_size
            for step in range(1, max_step):
                start_time = time.time()
                feed_dict = fill_feed_dict(train_data, images_placeholder,
                                           y_placeholder, z_placeholder)

                _, gloss_value, dloss_value = sess.run(
                    [train_op, g_loss, d_loss], feed_dict=feed_dict)

                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

                duration = time.time() - start_time
                if step % 10 == 0:
                    print(
                        '>> Step %d run_train: g_loss = %.2f d_loss = %.2f(%.3f sec)'
                        % (step, gloss_value, dloss_value, duration))
                    #-------------------------------

                if step % 100 == 0:

                    demo_result = sess.run(demo_img,
                                           feed_dict={
                                               z_placeholder: demo_noise,
                                               y_placeholder: demo_label
                                           })
                    save_images(demo_result, step)
                    print('>> %s Saving in %s' %
                          (datetime.now(), checkpoint_dir))
                    saver.save(sess, checkpoint_file, global_step=step)

        except KeyboardInterrupt:
            print('INTERRUPTED')

        finally:
            saver.save(sess, checkpoint_file, global_step=step)
            print('Model saved in file :%s' % checkpoint_dir)

        sess.close()