Ejemplo n.º 1
0
def main(_):
    # Create the input placeholder
    x_image = tf.placeholder(tf.float32, [batch_size, input_res, input_res, input_channels])

    # Define loss and optimizer
    y_image_ = tf.placeholder(tf.float32, [batch_size, input_res, input_res, 1])

    y_image, mode_training = model.make_unet(x_image=x_image)

    total_loss = loss.cross_entropy(y_image, y_image_)

    # Dataset
    test_dataset_filename = os.path.join(TFRECORDS_DIR, "test.tfrecord")
    test_images, test_polygons, test_raster_polygons = dataset.read_and_decode(test_dataset_filename, input_res,
                                                                               output_vertex_count, batch_size,
                                                                               INPUT_DYNAMIC_RANGE,
                                                                               augment_dataset=False)

    # Saver
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # Restore checkpoint if one exists
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINTS_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
            print("Restoring {} checkpoint {}".format(model_name, checkpoint.model_checkpoint_path))
            saver.restore(sess, checkpoint.model_checkpoint_path)
        else:
            print("No checkpoint was found, exiting...")
            exit()

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        test_image_batch, test_polygon_batch, test_raster_polygon_batch = sess.run([test_images, test_polygons, test_raster_polygons])
        test_loss, test_y_image_batch = sess.run(
            [total_loss, y_image],
            feed_dict={
                x_image: test_image_batch,
                y_image_: test_raster_polygon_batch, mode_training: True
            })

        print("Test loss= {}".format(test_loss))

        # Threshold output
        test_raster_polygon_batch = 0.5 < test_raster_polygon_batch
        test_y_image_batch = 0.5 < test_y_image_batch

        # Polygonize
        print("Polygonizing...")
        y_coord_batch_list = []
        for test_raster_polygon, test_y_image in zip(test_raster_polygon_batch, test_y_image_batch):
            test_raster_polygon = test_raster_polygon[:, :, 0]
            test_y_image = test_y_image[:, :, 0]

            # Select only one blob
            seed = np.logical_and(test_raster_polygon, test_y_image)
            test_y_image = skimage.morphology.reconstruction(seed, test_y_image, method='dilation', selem=None, offset=None)

            # Vectorize
            test_y_coords = polygon_utils.raster_to_polygon(test_y_image, output_vertex_count)
            y_coord_batch_list.append(test_y_coords)
        y_coord_batch = np.array(y_coord_batch_list)

        # Normalize
        y_coord_batch = y_coord_batch / input_res

        if not os.path.exists(SAVE_DIR):
            os.makedirs(SAVE_DIR)
        save_results(test_image_batch, test_polygon_batch, y_coord_batch, SAVE_DIR)

        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 2
0
def main(_):
    # Create the input placeholder
    x_image = tf.placeholder(
        tf.float32, [batch_size, input_res, input_res, input_channels])

    # Define loss and optimizer
    y_image_ = tf.placeholder(tf.float32,
                              [batch_size, input_res, input_res, 1])

    y_image, mode_training = model.make_unet(x_image=x_image)

    # Build the objective loss function as well as the accuracy parts of the graph
    total_loss = loss.cross_entropy(y_image, y_image_)
    tf.summary.scalar('total_loss', total_loss)

    global_step = tf.Variable(0,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')
    learning_rate = tf.train.piecewise_constant(
        global_step, LEARNING_RATE_PARAMS["boundaries"],
        LEARNING_RATE_PARAMS["values"])

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(
            total_loss, global_step=global_step)

    # Summaries
    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "train"),
                                         tf.get_default_graph())
    val_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "val"),
                                       tf.get_default_graph())

    # Dataset
    train_dataset_filename = os.path.join(TFRECORDS_DIR, "train.tfrecord")
    train_images, train_polygons, train_raster_polygons = dataset.read_and_decode(
        train_dataset_filename, input_res, output_vertex_count, batch_size,
        INPUT_DYNAMIC_RANGE)
    val_dataset_filename = os.path.join(TFRECORDS_DIR, "val.tfrecord")
    val_images, val_polygons, val_raster_polygons = dataset.read_and_decode(
        val_dataset_filename,
        input_res,
        output_vertex_count,
        batch_size,
        INPUT_DYNAMIC_RANGE,
        augment_dataset=False)

    # Savers
    saver = tf.train.Saver()

    # The op for initializing the variables.
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)

        # Restore checkpoint if one exists
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINTS_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
            print("Restoring {} checkpoint {}".format(
                model_name, checkpoint.model_checkpoint_path))
            saver.restore(sess, checkpoint.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        init_plots()

        print("Model has {} trainable variables".format(
            tf_utils.count_number_trainable_params()))

        i = tf.train.global_step(sess, global_step)
        while i <= max_iter:
            train_image_batch, train_polygon_batch, train_raster_polygon_batch = sess.run(
                [train_images, train_polygons, train_raster_polygons])
            if i % train_loss_accuracy_steps == 0:
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                train_summary, _, train_loss, train_y_image = sess.run(
                    [merged_summaries, train_step, total_loss, y_image],
                    feed_dict={
                        x_image: train_image_batch,
                        y_image_: train_raster_polygon_batch,
                        mode_training: True
                    },
                    options=run_options,
                    run_metadata=run_metadata)
                train_writer.add_summary(train_summary, i)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
                print('step %d, training loss = %g' % (i, train_loss))
                plot_results(1, train_image_batch, train_polygon_batch,
                             train_y_image)
            else:
                _ = sess.run(
                    [train_step],
                    feed_dict={
                        x_image: train_image_batch,
                        y_image_: train_raster_polygon_batch,
                        mode_training: True
                    })

            # Measure validation loss and accuracy
            if i % val_loss_accuracy_steps == 1:
                val_image_batch, val_polygon_batch, val_raster_polygon_batch = sess.run(
                    [val_images, val_polygons, val_raster_polygons])
                val_summary, val_loss, val_y_image = sess.run(
                    [merged_summaries, total_loss, y_image],
                    feed_dict={
                        x_image: val_image_batch,
                        y_image_: val_raster_polygon_batch,
                        mode_training: True
                    })
                val_writer.add_summary(val_summary, i)

                print('step %d, validation loss = %g' % (i, val_loss))
                plot_results(2, val_image_batch, val_polygon_batch,
                             val_y_image)

            # Save checkpoint
            if i % checkpoint_steps == (checkpoint_steps - 1):
                saver.save(sess,
                           os.path.join(CHECKPOINTS_DIR, model_name),
                           global_step=global_step)

            i = tf.train.global_step(sess, global_step)

        coord.request_stop()
        coord.join(threads)

        train_writer.close()
        val_writer.close()
Ejemplo n.º 3
0
J = 3
x = tf.placeholder("float", [None,None,None,3])
y = tf.placeholder("float", [None,None,None,3])
reconstruction = Net.RecNet(x,J)
#reconstruction = OtherNet.OtherNet(x)
print("模型完成")
# COST
cost = tf.reduce_mean(tf.square(reconstruction-y))
# OPTIMIZER
optm = tf.train.AdamOptimizer(0.01).minimize(cost)

epochs = 10
batch_size = 100
n_example = 14000
disp_step = 10
label, noise = dataset.read_and_decode(["data.tfrecords"])
label_batch, noise_batch = tf.train.shuffle_batch([label, noise],
                                                batch_size=batch_size, capacity=n_example,
                                                min_after_dequeue=1000)

init_op = tf.global_variables_initializer()

testdatapath='data/test/'
outputpath='tmp/test/'

savedir = "tmp/"
saver   = tf.train.Saver(max_to_keep=1)
with tf.Session() as sess: #开始一个会话
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
Ejemplo n.º 4
0
def train():

    with tf.Graph().as_default():
        readModel.train_with_inception()
        global_step = tf.Variable(0, trainable=False)

        # 这里是模型的部分
        #images, labels = cifar10.distorted_inputs()
        train_path = "C:/Oops!yhy/Learning/Graduation_Project/inception-v3/inception-v3/mytrain.tfrecords2"
        #   data = dataset.create_record(cwd,classes,train_path,2) 直接从模型里面读取出来
        img, label = dataset.read_and_decode(train_path)

        #inception_tensor = graph.get_tensor_by_name('mixed_10/join:0') # 取出这一层的值
        inception_tensor, img_tensor = readModel.train_with_inception()

        #inception_ans = sess.run([inception_tensor, {'DecodeJpeg/contents:0':img}])
        #inception_ans = tf.Variable(tf.random_normal([1, 8, 8, 2048]),name='inception_ans')
        #inception_ans = tf.placeholder(tf.float32,shape=(1, 8, 8, 2048),name='inception_ans')
        inception = tf.reshape(inception_tensor, [8, 8, 2048])

        print("step(之前)!___训练成一个batch!", inception.shape)  # batch_size

        #开始训练batch
        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(dataset.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                                 min_fraction_of_examples_in_queue)
        num_preprocess_threads = 16
        images, label_batch = tf.train.shuffle_batch(
            [inception, label],  # 这里创建了一个队列
            batch_size=FLAGS.batch_size,
            num_threads=num_preprocess_threads,
            capacity=min_queue_examples + 3 * FLAGS.batch_size,
            min_after_dequeue=min_queue_examples)

        labels = tf.reshape(label_batch, [FLAGS.batch_size])
        tf.summary.image('images', images)
        print("step1!___", images.shape, labels.shape)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = upLevel.inference(images)
        print("step3!___", logits)  # batch_size

        # Calculate loss.
        loss = upLevel.loss(logits, labels, label_batch)
        print("step4!___", loss)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = upLevel.train(loss, global_step)
        print("step5!___")

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())
        print("step6!___")

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        #sess = tf.Session(config=tf.ConfigProto(
        #    log_device_placement=FLAGS.log_device_placement))

        #with tf.Session() as sess:
        sess = tf.Session()
        sess.run(init)
        #inception_ans = sess.run([inception_tensor, {'DecodeJpeg/contents:0':img}])
        tf.train.start_queue_runners(sess=sess)

        #summary_writer = tf.summary.FileWriter(train_dir,sess.graph)
        #summary_writer.close()
        #val = sess.run(loss)
        #print ("傻逼",val)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            #feed_dict = {img_tensor:img}
            val = sess.run(
                [inception_tensor, {
                    'DecodeJpeg/contents:0': images
                }])
            print("看看这样行不行?", val)
            _, loss_value = sess.run([train_op, loss])
            print("傻逼", loss_value)

            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Ejemplo n.º 5
0
def main(_):
    # Create the input placeholder
    x_image = tf.placeholder(
        tf.float32, [batch_size, input_res, input_res, input_channels])

    # Define loss and optimizer
    y_coords_ = tf.placeholder(tf.float32,
                               [batch_size, output_vertex_count, 2])

    y_coords, keep_prob = model.feature_extractor_polygon_regressor(
        x_image=x_image,
        feature_extractor_name=FEATURE_EXTRACTOR_PARAMS["name"],
        encoding_length=ENCODING_LENGTH,
        output_vertex_count=output_vertex_count)

    # Build the objective loss function as well as the accuracy parts of the graph
    _, accuracy, accuracy2, accuracy3 = loss.loss_and_accuracy(
        y_coords_, y_coords, batch_size, correct_dist_threshold)

    # Dataset
    test_dataset_filename = os.path.join(TFRECORDS_DIR, "test.tfrecord")
    test_images, test_polygons = dataset.read_and_decode(test_dataset_filename,
                                                         input_res,
                                                         output_vertex_count,
                                                         batch_size,
                                                         INPUT_DYNAMIC_RANGE,
                                                         augment_dataset=False)

    # Saver
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # Restore checkpoint if one exists
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINTS_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
            print("Restoring {} checkpoint {}".format(
                model_name, checkpoint.model_checkpoint_path))
            saver.restore(sess, checkpoint.model_checkpoint_path)
        else:
            print("No checkpoint was found, exiting...")
            exit()

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        test_image_batch, test_polygon_batch = sess.run(
            [test_images, test_polygons])
        test_accuracy, test_accuracy2, test_accuracy3, test_y_coords = sess.run(
            [accuracy, accuracy2, accuracy3, y_coords],
            feed_dict={
                x_image: test_image_batch,
                y_coords_: test_polygon_batch,
                keep_prob: 1.0
            })

        print('Test accuracy = %g accuracy2 = %g accuracy3 = %g' %
              (test_accuracy, test_accuracy2, test_accuracy3))

        if not os.path.exists(SAVE_DIR):
            os.makedirs(SAVE_DIR)
        save_results(test_image_batch, test_polygon_batch, test_y_coords,
                     SAVE_DIR)

        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 6
0
def main(_):
    # Create the input placeholder
    x_image = tf.placeholder(
        tf.float32, [batch_size, input_res, input_res, input_channels])

    # Define loss and optimizer
    y_coords_ = tf.placeholder(tf.float32,
                               [batch_size, output_vertex_count, 2])

    # Build the graph for the deep net
    # y_coords, keep_prob = model.polygon_regressor(x_image=x_image, input_res=input_res, input_channels=input_channels,
    #                                               encoding_length=encoding_length,
    #                                               output_vertex_count=output_vertex_count, weight_decay=weight_decay)

    y_coords, keep_prob = model.feature_extractor_polygon_regressor(
        x_image=x_image,
        feature_extractor_name=FEATURE_EXTRACTOR_PARAMS["name"],
        encoding_length=ENCODING_LENGTH,
        output_vertex_count=output_vertex_count,
        weight_decay=weight_decay)

    # Build the objective loss function as well as the accuracy parts of the graph
    objective_loss, accuracy, accuracy2, accuracy3 = loss.loss_and_accuracy(
        y_coords_, y_coords, batch_size, correct_dist_threshold)
    tf.add_to_collection('losses', objective_loss)

    # Add all losses (objective loss + weigh loss for now)
    total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    tf.summary.scalar('total_loss', total_loss)

    global_step = tf.Variable(0,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')
    learning_rate = tf.train.piecewise_constant(
        global_step, LEARNING_RATE_PARAMS["boundaries"],
        LEARNING_RATE_PARAMS["values"])
    feature_extractor_learning_rate = tf.train.piecewise_constant(
        global_step, FEATURE_EXTRACTOR_LEARNING_RATE_PARAMS["boundaries"],
        FEATURE_EXTRACTOR_LEARNING_RATE_PARAMS["values"])
    decoder_learning_rate = tf.train.piecewise_constant(
        global_step, DECODER_LEARNING_RATE_PARAMS["boundaries"],
        DECODER_LEARNING_RATE_PARAMS["values"])

    # Choose the ensemble of variables to train
    trainable_variables = tf.trainable_variables()
    feature_extractor_variables = []
    decoder_variables = []
    other_variables = []
    for var in trainable_variables:
        if var.name.startswith(FEATURE_EXTRACTOR_PARAMS["name"]):
            feature_extractor_variables.append(var)
        elif var.name.startswith(DECODER_PARAMS["name"]):
            decoder_variables.append(var)
        else:
            other_variables.append(var)

    with tf.name_scope('adam_optimizer'):
        # This optimizer uses 3 different optimizers to allow different learning rates for the 3 different sub-graphs
        other_train_op = tf.train.AdamOptimizer(learning_rate)
        feature_extractor_train_op = tf.train.AdamOptimizer(
            feature_extractor_learning_rate)
        decoder_train_op = tf.train.AdamOptimizer(decoder_learning_rate)
        grads = tf.gradients(
            total_loss,
            other_variables + feature_extractor_variables + decoder_variables)
        feature_extractor_variable_count = len(feature_extractor_variables)
        other_variable_count = len(other_variables)
        other_grads = grads[:other_variable_count]
        feature_extractor_grads = grads[
            other_variable_count:other_variable_count +
            feature_extractor_variable_count]
        decoder_grads = grads[other_variable_count +
                              feature_extractor_variable_count:]
        other_train_step = other_train_op.apply_gradients(
            zip(other_grads, other_variables))
        feature_extractor_train_step = feature_extractor_train_op.apply_gradients(
            zip(feature_extractor_grads, feature_extractor_variables))
        decoder_train_step = decoder_train_op.apply_gradients(
            zip(decoder_grads, decoder_variables))
        train_step = tf.group(other_train_step,
                              feature_extractor_train_step, decoder_train_step,
                              tf.assign_add(global_step, 1))

    # Summaries
    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "train"),
                                         tf.get_default_graph())
    val_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "val"),
                                       tf.get_default_graph())

    # Dataset
    train_dataset_filename = os.path.join(TFRECORDS_DIR, "train.tfrecord")
    train_images, train_polygons = dataset.read_and_decode(
        train_dataset_filename, input_res, output_vertex_count, batch_size,
        INPUT_DYNAMIC_RANGE)
    val_dataset_filename = os.path.join(TFRECORDS_DIR, "val.tfrecord")
    val_images, val_polygons = dataset.read_and_decode(val_dataset_filename,
                                                       input_res,
                                                       output_vertex_count,
                                                       batch_size,
                                                       INPUT_DYNAMIC_RANGE,
                                                       augment_dataset=False)

    # Savers
    feature_extractor_saver = tf.train.Saver(feature_extractor_variables)
    decoder_saver = tf.train.Saver(decoder_variables)
    saver = tf.train.Saver()

    # The op for initializing the variables.
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)

        # Restore checkpoint if one exists
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINTS_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
            print("Restoring {} checkpoint {}".format(
                model_name, checkpoint.model_checkpoint_path))
            saver.restore(sess, checkpoint.model_checkpoint_path)
        else:
            # Else load pre-trained parts of the model as initialisation
            if FEATURE_EXTRACTOR_PARAMS["use_pretrained_weights"]:
                print(
                    "Initializing feature extractor variables from checkpoint {}"
                    .format(FEATURE_EXTRACTOR_PARAMS["checkpoint_filepath"]))
                feature_extractor_saver.restore(
                    sess, FEATURE_EXTRACTOR_PARAMS["checkpoint_filepath"])
            if DECODER_PARAMS["use_pretrained_weights"]:
                print(
                    "Initializing Decoder variables from directory {}".format(
                        DECODER_PARAMS["checkpoint_dir"]))
                checkpoint = tf.train.get_checkpoint_state(
                    DECODER_PARAMS["checkpoint_dir"])
                if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
                    print("Restoring checkpoint {}".format(
                        checkpoint.model_checkpoint_path))
                    decoder_saver.restore(sess,
                                          checkpoint.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        init_plots()

        print("Model has {} trainable variables".format(
            tf_utils.count_number_trainable_params(
                trainable_variables=trainable_variables)))

        i = tf.train.global_step(sess, global_step)
        while i <= max_iter:
            train_image_batch, train_polygon_batch = sess.run(
                [train_images, train_polygons])
            if i % train_loss_accuracy_steps == 0:
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                train_summary, _, train_loss, train_accuracy, train_accuracy2, train_accuracy3, train_y_coords = sess.run(
                    [
                        merged_summaries, train_step, total_loss, accuracy,
                        accuracy2, accuracy3, y_coords
                    ],
                    feed_dict={
                        x_image: train_image_batch,
                        y_coords_: train_polygon_batch,
                        keep_prob: dropout_keep_prob
                    },
                    options=run_options,
                    run_metadata=run_metadata)
                train_writer.add_summary(train_summary, i)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
                print(
                    'step %d, training loss = %g, accuracy = %g accuracy2 = %g accuracy3 = %g'
                    % (i, train_loss, train_accuracy, train_accuracy2,
                       train_accuracy3))
                plot_results(1, train_image_batch, train_polygon_batch,
                             train_y_coords)
            else:
                _ = sess.run(
                    [train_step],
                    feed_dict={
                        x_image: train_image_batch,
                        y_coords_: train_polygon_batch,
                        keep_prob: dropout_keep_prob
                    })

            # Measure validation loss and accuracy
            if i % val_loss_accuracy_steps == 1:
                val_image_batch, val_polygon_batch = sess.run(
                    [val_images, val_polygons])
                val_summary, val_loss, val_accuracy, val_accuracy2, val_accuracy3, val_y_coords = sess.run(
                    [
                        merged_summaries, total_loss, accuracy, accuracy2,
                        accuracy3, y_coords
                    ],
                    feed_dict={
                        x_image: val_image_batch,
                        y_coords_: val_polygon_batch,
                        keep_prob: 1.0
                    })
                val_writer.add_summary(val_summary, i)

                print(
                    'step %d, validation loss = %g, accuracy = %g, accuracy2 = %g, accuracy3 = %g'
                    %
                    (i, val_loss, val_accuracy, val_accuracy2, val_accuracy3))
                plot_results(2, val_image_batch, val_polygon_batch,
                             val_y_coords)

            # Save checkpoint
            if i % checkpoint_steps == (checkpoint_steps - 1):
                saver.save(sess,
                           os.path.join(CHECKPOINTS_DIR, model_name),
                           global_step=global_step)

            i = tf.train.global_step(sess, global_step)

        coord.request_stop()
        coord.join(threads)

        train_writer.close()
        val_writer.close()
Ejemplo n.º 7
0
def main(_):
    # Create the model
    x_image = tf.placeholder(tf.float32, [batch_size, input_res, input_res, 1])

    # Define loss and optimizer
    y_coords_ = tf.placeholder(tf.float32,
                               [batch_size, output_vertex_count, 2])

    # Build the graph for the deep net
    y_coords, keep_prob = polygon_encoder_decoder(
        x_image=x_image,
        input_res=input_res,
        encoding_length=encoding_length,
        output_vertex_count=output_vertex_count,
        weight_decay=weight_decay)

    # Build the objective loss function as well as the accuracy parts of the graph
    objective_loss, accuracy = loss.loss_and_accuracy(y_coords_, y_coords,
                                                      batch_size,
                                                      correct_dist_threshold)
    tf.add_to_collection('losses', objective_loss)

    # Add all losses (objective loss + weigh loss for now)
    total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    tf.summary.scalar('total_loss', total_loss)

    global_step = tf.Variable(0,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(
            total_loss, global_step=global_step)

    # Summaries
    merged_summaries = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "train"),
                                         tf.get_default_graph())
    val_writer = tf.summary.FileWriter(os.path.join(LOGS_DIR, "val"),
                                       tf.get_default_graph())

    # Dataset
    train_dataset_filename = os.path.join(DATASET_DIR, "train.tfrecord")
    train_images, train_polygons = dataset.read_and_decode(
        train_dataset_filename, input_res, output_vertex_count, batch_size,
        INPUT_DYNAMIC_RANGE)
    val_dataset_filename = os.path.join(DATASET_DIR, "val.tfrecord")
    val_images, val_polygons = dataset.read_and_decode(val_dataset_filename,
                                                       input_res,
                                                       output_vertex_count,
                                                       batch_size,
                                                       INPUT_DYNAMIC_RANGE)

    # Savers
    saver = tf.train.Saver()

    # The op for initializing the variables.
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)

        # Restore checkpoint if one exists
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINTS_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:  # First check if the whole model has a checkpoint
            print("Restoring {} checkpoint {}".format(
                MODEL_NAME, checkpoint.model_checkpoint_path))
            saver.restore(sess, checkpoint.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        # init_plots()

        print("Model has {} trainable variables".format(
            tf_utils.count_number_trainable_params()))

        i = tf.train.global_step(sess, global_step)
        while i <= max_iter:
            train_image_batch, train_polygon_batch = sess.run(
                [train_images, train_polygons])
            if i % train_loss_accuracy_steps == 0:
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                train_summary, _, train_loss, train_accuracy, train_y_coords = sess.run(
                    [
                        merged_summaries, train_step, total_loss, accuracy,
                        y_coords
                    ],
                    feed_dict={
                        x_image: train_image_batch,
                        y_coords_: train_polygon_batch,
                        keep_prob: dropout_keep_prob
                    },
                    options=run_options,
                    run_metadata=run_metadata)
                train_writer.add_summary(train_summary, i)
                train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
                print('step %d, training loss = %g, accuracy = %g' %
                      (i, train_loss, train_accuracy))
                # plot_results(1, train_image_batch, train_polygon_batch, train_y_coords)
            else:
                _ = sess.run(
                    [train_step],
                    feed_dict={
                        x_image: train_image_batch,
                        y_coords_: train_polygon_batch,
                        keep_prob: dropout_keep_prob
                    })

            # Measure validation loss and accuracy
            if i % val_loss_accuracy_steps == 1:
                val_image_batch, val_polygon_batch = sess.run(
                    [val_images, val_polygons])
                val_summary, val_loss, val_accuracy, val_y_coords = sess.run(
                    [merged_summaries, total_loss, accuracy, y_coords],
                    feed_dict={
                        x_image: val_image_batch,
                        y_coords_: val_polygon_batch,
                        keep_prob: 1.0
                    })
                val_writer.add_summary(val_summary, i)

                print('step %d, validation loss = %g, accuracy = %g' %
                      (i, val_loss, val_accuracy))
                # plot_results(2, val_image_batch, val_polygon_batch, val_y_coords)

            # Save checkpoint
            if i % checkpoint_steps == (checkpoint_steps - 1):
                saver.save(sess,
                           os.path.join(CHECKPOINTS_DIR, MODEL_NAME),
                           global_step=global_step)

            i = tf.train.global_step(sess, global_step)

        coord.request_stop()
        coord.join(threads)

        train_writer.close()
        val_writer.close()