def __init__(self):
        self.accum_crop = 0
        self.accum_process = 0
        self.s = tf.placeholder(dtype=tf.float32, shape=[None, 16, 16, 3])
        self.m = tf.placeholder(dtype=tf.float32, shape=[None, 16, 16, 3])
        self.b = tf.placeholder(dtype=tf.float32, shape=[None, 16, 16, 3])
        self.cls_img = tf.placeholder(dtype=tf.float32,
                                      shape=[None, 32, 32, 3])
        self.det_gt = tf.placeholder(dtype=tf.float32, shape=[None, 2])
        self.cls_gt = tf.placeholder(dtype=tf.float32, shape=[None, 4])

        self.det_out, self.det_soft = cnn.deepConcatNet(
            self.s, self.m, self.b, 2, self.keep_prob, self.is_training)
        self.cls_out, self.cls_soft = cnn.deepClassNet(self.cls_img, 4,
                                                       self.keep_prob,
                                                       self.is_training)

        self.det_train = cnn.trainStep(self.det_out, self.det_gt)
        self.det_acc = cnn.accuracy(self.det_out, self.det_gt)
        self.cls_train = cnn.trainStep(self.cls_out, self.cls_gt)
        self.cls_acc = cnn.accuracy(self.cls_out, self.cls_gt)

        self.img = tf.placeholder(dtype=tf.float32, shape=[32, 32, 3])

        self.init = tf.global_variables_initializer()
        self.saver = tf.train.Saver()
        self.restored = False
Пример #2
0
    csvlist[0].append("loss")

    with tf.Graph().as_default():
        # image tensor
        images_placeholder = tf.placeholder("float",
                                            shape=(None, nn.IMAGE_PIXELS))
        # label tensor
        labels_placeholder = tf.placeholder("float",
                                            shape=(None, nn.NUM_CLASSES))
        # dropout tensor
        keep_prob = tf.placeholder("float")

        logits = nn.inference(images_placeholder, keep_prob)
        loss_value = nn.loss(logits, labels_placeholder)
        train_op = nn.training(loss_value, FLAGS.learning_rate)
        acc = nn.accuracy(logits, labels_placeholder)

        saver = tf.train.Saver()
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        # train
        for step in range(FLAGS.max_steps):
            for i in range(len(train_image) / FLAGS.batch_size):
                batch = FLAGS.batch_size * i
                sess.run(train_op,
                         feed_dict={
                             images_placeholder:
                             train_image[batch:batch + FLAGS.batch_size],
Пример #3
0
        ch_size=CH_SIZE,
        shuffle = True,
        distored = True)
"""

#output=mynn.inference2(images,keep_prob,IMAGE_SIZE,CH_SIZE,NUM_CLASS)
output = mynn.inference(images, keep_prob, IMAGE_SIZE, CH_SIZE, NUM_CLASS)
validate = mynn.inference(v_images,
                          keep_prob,
                          IMAGE_SIZE,
                          CH_SIZE,
                          NUM_CLASS,
                          validate=True)
loss = mynn.loss(output, labels)
train_op = mynn.training(loss)
acc = mynn.accuracy(validate, v_labels)

with tf.Session() as sess:
    saver = tf.train.Saver(max_to_keep=0)
    sess.run(tf.initialize_all_variables())
    ckpt = tf.train.get_checkpoint_state(sess, '/output/')
    print(ckpt)
    saver.restore(sess, '/output/model.ckpt-%s' % (94000))
    # SummaryWriterでグラフを書く
    tf.train.start_queue_runners(sess)
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(LOGDIR, graph=sess.graph)
    for step in range(MAX_STEPS):
        start_time = time.time()
        _, loss_result = sess.run([train_op, loss],
                                  feed_dict={keep_prob: 0.98})
Пример #4
0
def train():
  with tf.Graph().as_default():
    
    log('===== START TRAIN RUN: ' + str(datetime.now()) + '=====')
    
    global_step = tf.Variable(0, trainable=False)
    
    # get examples and labels
    examples, labels = cnn.inputs(data_type='train')

    # build graph to compute logits
    logits = cnn.inference(examples)

    # compute loss
    loss, losses_collection = cnn.loss(logits, labels)
    accuracy = cnn.accuracy(logits, labels)

    # train model with one batch of examples
    train_op = cnn.train(loss, global_step)

    # create saver
    saver = tf.train.Saver(tf.all_variables())
  
    # build summary and init op
    summary_op = tf.merge_all_summaries()
    init_op = tf.initialize_all_variables()

    # start session
    # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
    sess = tf.Session()
    sess.run(init_op)
    
    # start queue runners
    tf.train.start_queue_runners(sess=sess)

    # set up summary writers
    train_writer = tf.train.SummaryWriter(config.train_dir, sess.graph)
    
    for step in xrange(config.max_steps):
      
      start_time = time.time()
      summary, loss_value, accuracy_value, _ = sess.run([summary_op, loss, accuracy, train_op])

      loss_breakdown = [(str(l.op.name), sess.run(l)) for l in losses_collection]
        
      duration = time.time() - start_time

      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step % config.summary_every_n_steps == 0: # summaries
        
        examples_per_sec = config.batch_size / duration
        sec_per_batch = float(duration)
        
        train_writer.add_summary(summary, step)

        log_str_1 = ('%s: step %d, loss = %.3f (%.2f examples/sec; %.3f sec/batch), accuracy %.3f   ') % (datetime.now(), step, loss_value,
                             examples_per_sec, sec_per_batch, accuracy_value)
        log_str_1 += str(loss_breakdown) # print loss breakdown
        log(log_str_1)

        log("memory usage: {} Mb".format(float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)/1000000.0))
        

      if (step % config.ckpt_every_n_steps == 0) and (step>0): # save weights to file & validate
        checkpoint_path = os.path.join(config.checkpoint_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)
        log("Checkpoint saved at step %d" % step)