def eval():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('eval', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            print 'Cannot find checkpoint file: ' + FLAGS.checkpoint_dir + '/model.ckpt'
            return

        delta = datetime.timedelta()
        max_steps = 10
        true_count = 0.
        total_sample_count = max_steps * FLAGS.batch_size

        top_k_op = tf.nn.in_top_k(hypothesis, labels, 1)
        tf.train.start_queue_runners(sess=sess)

        for i in range(0, max_steps):
            start = datetime.datetime.now()
            predictions = sess.run(top_k_op, feed_dict={keep_prob: 1.0})
            true_count += np.sum(predictions)
            delta += datetime.datetime.now() - start

    print 'total sample count: %d' % total_sample_count
    print 'precision @ 1: %f' % (true_count / total_sample_count)
    print 'evaluation time: %f seconds' % ((delta.seconds + delta.microseconds / 1E6) / max_steps)
Esempio n. 2
0
def test_input_fn():
    # Get LFW dataset
    ((train_data, train_size),
     (test_data, test_size)), label_lookup = get_data(FLAGS.data_dir)

    test_data = test_data.batch(FLAGS.batch_size)

    test_it = test_data.make_one_shot_iterator()
    test_d, test_l = test_it.get_next()

    return test_d, test_l
Esempio n. 3
0
def main():
    live_cell = '0'
    dead_cell = '.'

    sleep_time = 1
    generations = 10

    m = get_data()

    # main loop
    for ii in range(generations):
        clear_screen()
        print_matrix(m)
        print "--- generation %s" % (ii + 1)
        m = transform_matrix(m, live_cell, dead_cell)
        time.sleep(sleep_time)

    print "Application terminated successfully."
def train():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('train', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            init = tf.initialize_all_variables()
            sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            sess.run(train_step, feed_dict={keep_prob: 0.7})
            print step, sess.run(cross_entropy, feed_dict={keep_prob: 1.0})

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.checkpoint_dir + '/model.ckpt')
Esempio n. 5
0
def train():
    keep_prob = tf.placeholder(tf.float32)
    images, labels = input.get_data('train', FLAGS.batch_size)
    hypothesis, cross_entropy, train_step = model.make_network(
        images, labels, keep_prob)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        if tf.gfile.Exists(FLAGS.checkpoint_dir + '/model.ckpt'):
            saver.restore(sess, FLAGS.checkpoint_dir + '/model.ckpt')
        else:
            init = tf.initialize_all_variables()
            sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        for step in range(FLAGS.max_steps):
            sess.run(train_step, feed_dict={keep_prob: 0.7})
            print step, sess.run(cross_entropy, feed_dict={keep_prob: 1.0})

            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.checkpoint_dir + '/model.ckpt')
Esempio n. 6
0
import tensorflow as tf
from resnet import resnet
from cfg import cfg
from tqdm import tqdm
from input import get_data

model=resnet(*get_data())
loss=model.get_loss()

test_model=resnet(*get_data('test'),False)
accuracy=test_model.get_accuracy()

global_step=tf.Variable(0,trainable=False,name='global_step')
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
    train_step=tf.train.MomentumOptimizer(0.001,0.9).minimize(loss,global_step=global_step)
saver=tf.train.Saver(max_to_keep=1)
saver_max=tf.train.Saver(max_to_keep=1)
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    acc_max=0
    saver.restore(sess,'ckpt_best/0.9299')
    while 1:
        for i in tqdm(range(0,50000,cfg.batch_size),ncols=70):
            _,step=sess.run([train_step,global_step])
        saver.save(sess,cfg.dir_save+'model',global_step=step)
        acc=0
        for i in range(0,10000,100):
            acc+=accuracy.eval()
        acc=acc/10000 
        if acc>=acc_max:
            acc_max=acc
Esempio n. 7
0
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.train)

                    print('Validation Data Eval:')
                    do_eval(sess, eval_correct, images_pl, labels_pl, data.validation)

            if step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Test Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl, data.test)

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                checkpoint_path = CHECKPOINT_DIR
                saver.save(sess, checkpoint_path, global_step=step)


start = time.time()
if 'jul' in SUMMARY_DIR or 'JUL' in SUMMARY_DIR:
    print("You will accidently delete imp data.")
    sys.exit(0)

if tf.gfile.Exists(SUMMARY_DIR):
    tf.gfile.DeleteRecursively(SUMMARY_DIR)
input.maybe_download_and_extract()
data = input.get_data(num_training=DS_SIZE, num_validation=1280)

run_training(data)
end = time.time()
total = (end-start)/3600
print("Total time:", total, "hrs")
print("Total time:", (end-start), "secs")
Esempio n. 8
0
 def load_data(self):
     self.m = get_data()  # list of strings
     self.ml = ''.join(self.m)  # string
     self.generation = 0
args = parser.parse_args()

# create refs
file = args.file
model = args.model
xcol = args.xcolumn
ycol = args.ycolumn
dfmt = args.dateformat
parameters = args.parameters
xdata = args.xdata
ydata = args.ydata

# minimum of 4 parameters
if parameters < 4:
    parameters = 4

# log info
print("Running python regression now using file %s and model y~%s" %
      (file, model))

# unpack data from file
x, y = input.get_data(file, dfmt, xcol, ycol, xdata, ydata)

# perfrom regression
result = rlib.regress_data(parameters, x, y, model)[0]

# print all our info
output.print_info(model, result)

# plot diagrams
output.plot(x, y, result, dfmt)
Esempio n. 10
0
            _, loss_val = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time

            assert not np.isnan(loss_val), 'Model diverged with loss = NaN'

            if step % 10 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_val, duration))
                if step > 0:
                    summary_str = sess.run(summary_op, feed_dict)
                    summary_writer.add_summary(summary_str, step)
                    summary_writer.flush()

            if step % 100 == 0 or step == N_EPOCH * (DS_SIZE //
                                                     BATCH_SIZE) - 1:
                save_path = saver.save(sess, "model.ckpt")
                print("Model saved in file: %s" % save_path)
                print('Training Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl, data.train)
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl,
                        data.validation)


if tf.gfile.Exists("summary"):
    tf.gfile.DeleteRecursively("summary")
# input.maybe_download_and_extract()
data = input.get_data()

run_training(data)
Esempio n. 11
0
            # if step % 100 == 0 or step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
            #     print('Training Data Eval:')
            #     do_eval(sess, eval_correct, images_pl, labels_pl, data.train)
            #
            #     print('Validation Data Eval:')
            #     do_eval(sess, eval_correct, images_pl, labels_pl, data.validation)

            if step == N_EPOCH * (DS_SIZE // BATCH_SIZE) - 1:
                print('Test Data Eval:')
                do_eval(sess, eval_correct, images_pl, labels_pl, data.test)

            # Save the model checkpoint periodically.
            if step % 500 == 0 or step == N_EPOCH * (DS_SIZE //
                                                     BATCH_SIZE) - 1:
                checkpoint_path = CHECKPOINT_DIR
                saver.save(sess, checkpoint_path, global_step=step)


start = time.time()

if tf.gfile.Exists(SUMMARY_DIR):
    tf.gfile.DeleteRecursively(SUMMARY_DIR)
# input.maybe_download_and_extract()
data = input.get_data(num_training=DS_SIZE, num_validation=VALID_SIZE)

run_training(data)
end = time.time()
# total = (end-start)/3600
# print("Total time:", total, "hrs")
print("Total time:", (end - start), "secs")
Esempio n. 12
0
    with tf.variable_scope('class_loss') as scope:
        classes_diff = pred_classes - real_classes
        # print 'classes_diff shape ', classes_diff
        class_loss = tf.mul(objects_in_grids, tf.reduce_sum(tf.square(classes_diff)))
        # print 'class_loss shape : ', class_loss.get_shape()
    with tf.variable_scope('total_loss') as scope:
        total_loss = coord_loss + obj_loss + noobj_loss + class_loss
        # print 'total_loss shape : ', total_loss.get_shape()
        loss = tf.reduce_mean(tf.reduce_sum(total_loss, reduction_indices=[1, 2, 3]), reduction_indices=0)
    tf.scalar_summary('loss',loss)

learning_rate = tf.placeholder(tf.float32)
train_step = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss)


test_x, test_y = input.get_data()
#def train_network(net):
sess = tf.Session()
mWriter = tf.train.SummaryWriter('log', sess.graph)
mop = tf.merge_all_summaries()
sess.run(tf.initialize_all_variables())
for epoch in range(135):
    #lr = get_learning_rate(epoch)
    #print 'lr = ',lr
    #weights_file = 'model.ckpt'
    #print 'weights_file = ', weights_file
    #print tf.train_variables()
    for i in range(16):
        index = i * 2
        xx = test_x[i:i+1]
        yy = test_y[i:i + 1]
Esempio n. 13
0
'''
box1 : ? * 7 * 7 * 4
box2 : ? * 7 * 7 * 4
real_box : ? * 4
return :
     ? * 7 * 7 * 2
'''


def cal_ious(box1, box2, real_box):
    IOUS_1 = tf.reshape(cal_iou(box1, real_box), [-1, 7, 7, 1])
    IOUS_2 = tf.reshape(cal_iou(box2, real_box), [-1, 7, 7, 1])
    res = tf.concat(3, [IOUS_1, IOUS_2])
    print 'IOUS_SHAPE : ', res.get_shape()
    return res


if __name__ == '__main__':
    yolo_tiny = YOLO_TF()
    yolo_tiny.build_networks()
    yolo_tiny.init_network()
    yolo_tiny.set_data(input.get_data())
    yolo_tiny.restore_weights(
        "/home/starsea/tensorflow/yolo/weights/yolo-tiny-epoch-6.ckpt")
    '''while True:
        path = raw_input("Enter image path: ");
        print "predicting ", path
        yolo_tiny.predict(path)'''
    #yolo_tiny.predict('/home/starsea/data/VOC2007/JPEGImages/000001.jpg')
    yolo_tiny.train()
Esempio n. 14
0
parser.add_argument('--num_epochs',
                    type=int,
                    default=1,
                    help='Number of training epochs between testing.')

parser.add_argument('--num_runs',
                    type=int,
                    default=10,
                    help='Number of train/test cycles.')

FLAGS = parser.parse_args()

# Get LFW dataset
((train_data, train_size),
 (test_data, test_size)), label_lookup = get_data(FLAGS.data_dir)

# PARAMETERS
NUM_LABELS = len(label_lookup)
SIZE_INPUT = 64


def get_weight(name, shape, stddev_):
    return tf.get_variable(name,
                           shape,
                           initializer=tf.truncated_normal_initializer(
                               stddev=stddev_, dtype=tf.float32),
                           dtype=tf.float32)


def get_bias(name, shape, initializer_):
Esempio n. 15
0
import fileinput
from input import get_data
import sys

data = get_data(sys.argv[1])
input = fileinput.input(sys.argv[2])
time = int(input[0])
t = []
for line in input:
    line_numbers = [int(jobs) for jobs in line.split()]
    t.append(line_numbers)
workstations = len(t[0])
print("workstations number = ", workstations)

times = []
for ws in range(workstations):
    for item in t:
        times.append(item[ws])
    now = 0
    done_time = []
    print("WORKSTATION ", ws)
    for i in range(len(times)):
        if now not in times:
            print("F**K IT in", i)
            # sys.exit()
        else:
            print("time is ", now)
            running = times.index(now)
            print("running job ", running)
            x = data.matrix[running][ws + 1]
            now = x + now