コード例 #1
0
def model_function(features, targets):
    targets = tf.one_hot(targets, 2, 1, 0)  # two perceptrons in output

    outputs = layers.fully_connected(inputs=features,
                                     num_outputs=2,
                                     activation_fn=tf.sigmoid)

    outputs_dict = {"labels": outputs}

    # Calculate loss using mean squared error
    loss = losses.mean_squared_error(outputs, targets)

    # Create training operation
    optimizer = layers.optimize_loss(
        loss=loss,
        # step is not an integer but a wrapper around it, just as Java has 'Integer' on top of 'int'
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer="SGD")

    # Why return 'loss' separately when it is already a part of optimizer?
    #   evaluate() needs only - outputs_dict,loss [does not need optimizer since it is not learning]
    #   fit() needs only - outputs_dict,loss,optimizer [does not need outputs_dict since it is not predicting]
    #   predict needs only - outputs_dict
    # So, 'loss' sent separately for use by evaluate()
    return outputs_dict, loss, optimizer
コード例 #2
0
def model_fn_example(dataset_tensor, evaluation, batch_size):
    input, output = dataset_tensor
    net_output = layers.fully_connected(input, 1, activation_fn=None)
    batch_error = losses.mean_squared_error(output, net_output)
    graph_data = {}
    global_step = training_util.get_or_create_global_step()

    # use different metrics depending of evaluation
    if evaluation:
        # accumulate the error for the result
        error_sum = tf.Variable(0.0, dtype=tf.float32, name='accumulated_error', trainable=False)
        error_sum = tf.assign_add(error_sum, batch_error)
        count = tf.Variable(0.0, dtype=tf.float32, name='data_samples', trainable=False)
        count = tf.assign_add(count, 1)
        error = error_sum / count
        graph_data['error'] = error
    else:
        # use moving averages for the error
        ema = tf.train.ExponentialMovingAverage(decay=0.9)
        update_op = ema.apply([batch_error])
        error = ema.average(batch_error)
        # add train operator
        sgd = tf.train.GradientDescentOptimizer(0.00001)
        train_op = sgd.minimize(batch_error, global_step)
        graph_data['error'] = error
        graph_data['update_op'] = update_op
        graph_data['train_op'] = train_op

    # add error to summary
    tf.summary.scalar('mse_error', error)
    return graph_data
コード例 #3
0
ファイル: predictron.py プロジェクト: b-kartal/predictron
 def build_loss(self):
   with tf.variable_scope('loss'):
     # Loss Eqn (5)
     # [batch_size, 1, maze_size]
     self.targets_tiled = tf.expand_dims(self.targets, 1)
     # [batch_size, K + 1, maze_size]
     self.targets_tiled = tf.tile(self.targets_tiled, [1, self.max_depth + 1, 1])
     self.loss_preturns = losses.mean_squared_error(self.g_preturns, self.targets_tiled, scope='preturns')
     losses.add_loss(self.loss_preturns)
     tf.summary.scalar('loss_preturns', self.loss_preturns)
     # Loss Eqn (7)
     self.loss_lambda_preturns = losses.mean_squared_error(
       self.g_lambda_preturns, self.targets, scope='lambda_preturns')
     losses.add_loss(self.loss_lambda_preturns)
     tf.summary.scalar('loss_lambda_preturns', self.loss_lambda_preturns)
     self.total_loss = losses.get_total_loss(name='total_loss')
コード例 #4
0
def model_function(features, targets, mode):
    # don't need one-hot encoding since target is already in one-hot format

    # sigmoid also will work although the interpretability is difficult;
    # The output with the max. value corresponds to the 'class' - whether sigmoid or softmax
    outputs = layers.fully_connected(
        inputs=features,
        num_outputs=10,  # 10 perceptrons for 10 numbers (0 to 9)
        activation_fn=tf.sigmoid)

    # Calculate loss using mean squared error
    loss = losses.mean_squared_error(outputs, targets)

    optimizer = layers.optimize_loss(
        loss=loss,
        # step is not an integer but a wrapper around it, just as Java has 'Integer' on top of 'int'
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer="SGD")

    # Return fractional values corresponding to the sigmoid perceptron outputs
    # Class of output (i.e., predicted number) corresponds to the perceptron returning the highest fractional value
    # Returning both fractional values and corresponding labels
    # 2nd parameter of '1' for argmax() means row-wise - this is what we want. For column-wise, use '0'.
    return {'probs': outputs, 'labels': tf.argmax(outputs, 1)}, loss, optimizer
コード例 #5
0
ファイル: predictron.py プロジェクト: wsjeon/predictron
 def build_loss(self):
     with tf.variable_scope('loss'):
         # Loss Eqn (5)
         # [batch_size, 1, maze_size]
         self.targets_tiled = tf.expand_dims(self.targets, 1)
         # [batch_size, K + 1, maze_size]
         self.targets_tiled = tf.tile(self.targets_tiled,
                                      [1, self.max_depth + 1, 1])
         self.loss_preturns = losses.mean_squared_error(self.g_preturns,
                                                        self.targets_tiled,
                                                        scope='preturns')
         losses.add_loss(self.loss_preturns)
         tf.summary.scalar('loss_preturns', self.loss_preturns)
         # Loss Eqn (7)
         self.loss_lambda_preturns = losses.mean_squared_error(
             self.g_lambda_preturns, self.targets, scope='lambda_preturns')
         losses.add_loss(self.loss_lambda_preturns)
         tf.summary.scalar('loss_lambda_preturns',
                           self.loss_lambda_preturns)
         self.total_loss = losses.get_total_loss(name='total_loss')
コード例 #6
0
ファイル: DNNAgent.py プロジェクト: ahmadelsallab/TwoSigma
 def __init__(self, N_FEATURES, LEARNING_RATE):
     '''
     Constructor
     '''
     self.x = tf.placeholder(tf.float32, shape=(None, N_FEATURES))
     y = tf.placeholder(tf.float32, shape=(None,1))
     self.p = tf.placeholder(tf.float32)
     self.logits = layers.fully_connected(self.x, 56, activation_fn=tf.nn.relu)
     self.logits = layers.dropout(self.logits, keep_prob=self.p)
     self.logits = layers.fully_connected(self.x, 56, activation_fn=tf.nn.relu)
     self.logits = layers.dropout(self.logits, keep_prob=self.p)
     y_ = layers.fully_connected(self.logits, 1)
     self.loss = losses.mean_squared_error(y, y_)
     
     # Objective
     # loss = tf.reduce_mean(tf.reduce_sum(tf.square(y-y_)) / tf.square(y_ - tf.reduce_mean(y_))) # Equivalent to minimize R2
     
     self.train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.loss)        
コード例 #7
0

        
コード例 #8
0
def model_function(features, targets, mode):

    #convert targets to one-hot vector representation
    targets = tf.one_hot(targets, 2, 1, 0)

    # Configure the single layer perceptron model
    outputs = layers.fully_connected(inputs=features,
                                     num_outputs=2,
                                     activation_fn=tf.sigmoid)

    # Calculate loss using mean squared error
    loss = losses.mean_squared_error(outputs, targets)

    # Create an optimizer for minimizing the loss function
    optimizer = layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer="SGD")

    return {'labels': outputs}, loss, optimizer
コード例 #9
0
def train():
    resolution = [128, 128]
    batch_size = 2
    # Create data inputs
    training_dataset = random_images_from_spectograms('data/spectograms',
                                                      resolution=resolution,
                                                      batch_size=batch_size)

    iterator = tf.data.Iterator.from_structure(training_dataset.output_types,
                                               training_dataset.output_shapes)
    sampled_images = iterator.get_next()

    training_init_op = iterator.make_initializer(training_dataset)
    # validation_init_op = iterator.make_initializer(validation_dataset)

    # Create tensorflow graph
    autoencoded_images = conv_ae_2d(sampled_images, encoder_setup,
                                    decoder_setup)
    loss = mean_squared_error(labels=sampled_images,
                              predictions=autoencoded_images)
    optimizer = GradientDescentOptimizer(learning_rate=1e-3)
    train_op = optimizer.minimize(loss)

    # Run 20 epochs in which the training dataset is traversed, followed by the
    # validation dataset.
    with tf.Session() as sess:
        epochs = 100
        for epoch in range(epochs):
            # Training loop
            sess.run(training_init_op)
            sess.run(tf.global_variables_initializer())
            with tqdm.trange(10) as t:
                for i in t:
                    # Description will be displayed on the left
                    t.set_description('GEN %i' % i)
                    _, loss = sess.run([train_op, loss])
                    t.set_postfix(loss=loss)
            print("epoch {}".format(epoch))
コード例 #10
0
ファイル: train_twins.py プロジェクト: JZDSS/SPCup
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
    num_gpus = len(FLAGS.gpu.split(','))
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    if not tf.gfile.Exists(FLAGS.data_dir):
        raise RuntimeError('data direction is not exist!')

    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)

    if not tf.gfile.Exists(FLAGS.ckpt_dir):
        tf.gfile.MakeDirs(FLAGS.ckpt_dir)
    train_example_batch, train_label_batch = input_pipeline(
        tf.train.match_filenames_once(os.path.join(FLAGS.data_dir, 'train', '*.tfrecords')), FLAGS.batch_size, FLAGS.patch_size)
    valid_example_batch, valid_label_batch = input_pipeline(
        tf.train.match_filenames_once(os.path.join(FLAGS.data_dir, 'valid', '*.tfrecords')), FLAGS.batch_size, FLAGS.patch_size)
    f = open(FLAGS.out_file, 'w')
    if not f:
        raise RuntimeError('OUTPUT FILE OPEN ERROR!!!!!!')
    with tf.name_scope('input'):
        x1 = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, 3], 'x1')
        x2 = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, 3], 'x2')
        tf.summary.image('show', x1, 1)
        tf.summary.image('show', x2, 1)

    with tf.name_scope('label'):
        y_1 = tf.placeholder(tf.int64, [None, 1], 'y1')
        y_2 = tf.placeholder(tf.int64, [None, 1], 'y2')
        y_ = tf.cast(tf.equal(y_1, y_2), tf.int64)

    is_training = tf.placeholder(tf.bool)

    y = twins.build_net(x1, x2, FLAGS.blocks, is_training)
    y = tf.reshape(tf.nn.sigmoid(y), [-1])
    weights = tf.reshape(tf.cast(y_, tf.float32) * 10, [-1])
    y_ = tf.reshape(y_, [-1])
    with tf.name_scope('scores'):
        loss.mean_squared_error(y, y_, weights)
        total_loss = tf.contrib.losses.get_total_loss(add_regularization_losses=True, name='total_loss')
        with tf.name_scope('accuracy'):
            correct_prediction = tf.equal(tf.cast(tf.round(y), tf.int64), y_)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('loss', total_loss)
        tf.summary.scalar('accuracy', accuracy)

    with tf.name_scope('train'):
        global_step = tf.Variable(FLAGS.start_step, name="global_step")
        # learning_rate = tf.train.piecewise_constant(global_step, [32000, 64000, 108000, ], [0.01, 0.001, 0.0001, 0.00001])
        learning_rate = tf.train.exponential_decay(0.1, global_step, 32000, 0.1)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_step = tf.train.MomentumOptimizer(learning_rate, momentum=FLAGS.momentum).minimize(total_loss,
                                                                                                     global_step=global_step)
    tf.summary.scalar('lr', learning_rate)

    merged = tf.summary.merge_all()

    with tf.name_scope("saver"):
        saver = tf.train.Saver(name="saver")

    with tf.Session(config=config) as sess:
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        if tf.gfile.Exists(os.path.join(FLAGS.ckpt_dir, 'checkpoint')):
            saver.restore(sess, os.path.join(FLAGS.ckpt_dir, FLAGS.model_name))
        else:
            sess.run(tf.global_variables_initializer())

        train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)
        test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test', sess.graph)
        train_writer.flush()
        test_writer.flush()

        def feed_dict(train, on_training):
            def get_batch(data, labels):
                d, l = sess.run([data, labels])
                d = d.astype(np.float32)
                l = l.astype(np.int64)
                return d, l

            if train:
                x1s, y1s = get_batch(train_example_batch, train_label_batch)
                x2s, y2s = get_batch(train_example_batch, train_label_batch)
            else:
                x1s, y1s = get_batch(valid_example_batch, valid_label_batch)
                x2s, y2s = get_batch(valid_example_batch, valid_label_batch)
            return {x1: x1s, x2: x2s, y_1: y1s, y_2: y2s, is_training: on_training}

        for i in range(FLAGS.start_step, FLAGS.max_steps + 1):
            feed = feed_dict(True, True)
            sess.run(train_step, feed_dict=feed)
            if i % 100 == 0 and i != 0:  # Record summaries and test-set accuracy
                loss0, acc0, summary = sess.run([total_loss, accuracy, merged], feed_dict=feed_dict(False, False))
                test_writer.add_summary(summary, i)
                loss1, acc1, summary = sess.run([total_loss, accuracy, merged], feed_dict=feed_dict(True, False))
                train_writer.add_summary(summary, i)
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=f)
                print('step %d: train_acc=%f, train_loss=%f; test_acc=%f, test_loss=%f' % (i, acc1, loss1, acc0, loss0),
                      file=f)
                saver.save(sess, os.path.join(FLAGS.ckpt_dir, FLAGS.model_name))
                f.flush()

        coord.request_stop()
        coord.join(threads)

    train_writer.close()
    test_writer.close()
    f.close()
コード例 #11
0
# softmax output. Gives global meaning to both the perceptrons combined
# output is [0.26,0.73], [0.26,0.73].
# [e^6/(e^6+e^7),e^7/(e^6 + e^7) ] = [1/1+e, 1/1+e^-1] = [0.26,0.73]
# [e^12/(e^12+e^13),e^13/(e^12 + e^13) ] = [1/1+e, 1/1+e^-1] = [0.26,0.73]
# Note how the result is very different from that obtained with sigmoid activation function.
nnout6 = layers.fully_connected(
    inputs=features,
    weights_initializer=tf.constant_initializer([[1.0, 1.0], [2.0, 2.0]]),
    biases_initializer=tf.constant_initializer([1.0, 2.0]),
    num_outputs=2,
    activation_fn=tf.nn.softmax)
session = tf.Session()
session.run(tf.initialize_all_variables())
session.run(nnout6)

outputs = tf.constant([0, 1, 0, 1])
targets = tf.constant([1, 1, 1, 0])
sq_loss1 = losses.mean_squared_error(outputs, targets)
log_loss1 = losses.log_loss(outputs, targets)

outputs2 = tf.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0],
                        [-100.0, -100.0, 100.0]])
targets2 = tf.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
sq_loss2 = losses.mean_squared_error(outputs2, targets2)

session = tf.Session()
session.run(tf.initialize_all_variables())
session.run(sq_loss1)  # 0.75 = [(0-1)^2 + (1-1)^2 + (0-1)^2 + (1-0)^2] / 4
session.run(sq_loss2)  # 10067 = (6*100^2 + 3*101^2)/9
session.run(log_loss1)  # sigma(-y_i*log(y_i))
コード例 #12
0
# Get first observation
observation = env.reset()

N_FEATURES = 108
LEARNING_RATE = 0.001

x = tf.placeholder(tf.float32, shape=(None, N_FEATURES))
y = tf.placeholder(tf.float32, shape=(None, 1))
p = tf.placeholder(tf.float32)
logits = layers.fully_connected(x, 56, activation_fn=tf.nn.relu)
logits = layers.dropout(logits, keep_prob=p)
logits = layers.fully_connected(x, 56, activation_fn=tf.nn.relu)
logits = layers.dropout(logits, keep_prob=p)
y_ = layers.fully_connected(logits, 1)

loss = losses.mean_squared_error(y, y_)

# loss = tf.reduce_mean(tf.reduce_sum(tf.square(y-y_)) / tf.square(y_ - tf.reduce_mean(y_))) # Equivalent to minimize R2

train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

from sklearn.cross_validation import train_test_split
traindf, testdf = train_test_split(observation.train.drop(
    axis=1, labels=["id", "timestamp"]).dropna(),
                                   train_size=0.8,
                                   test_size=0.2)

Y_train = traindf["y"]
X_train = traindf.drop(axis=1, labels=["y"])

Y_test = testdf["y"]
コード例 #13
0
import tensorflow as tf
import numpy as np
import tensorflow.contrib.losses as losses
import tensorflow.contrib.metrics as metrics
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = losses.mean_squared_error(y, y_)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()

sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.restore(
    sess,
    "/home/hardik/Desktop/MTech_Project/Scripts/Python/Brain_Research_Python/MNIST_data/myModel.ckpt"
)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(
    sess.run(accuracy, feed_dict={
        x: mnist.test.images,