Пример #1
0
def run_evaluate(opt_values):
    """
    This function performs the evaluation.
    """
    eval_name = opt_values['evaluate_name']
    evaluate_imp = utils.get_implementation(evaluate.Evaluate, eval_name)
    arch_name = opt_values['architecture_name']
    architecture_imp = utils.get_implementation(architecture.Architecture, arch_name)
    evaluate_imp.eval(opt_values, architecture_imp)
Пример #2
0
def log(opt_values, execution_dir):  #maybe in another module?
    """
    Stores a .json logfile in execution_dir/logs,
    based on the execution options opt_values.

    Args:
        opt_values: dictionary with the command line arguments of main.py.
        execution_dir: the directory regarding the execution to log.

    Returns:
        Nothing.
    """
    # Get architecture, dataset and loss name
    architecture_name = opt_values['architecture_name']
    dataset_name = opt_values['dataset_name']
    loss_name = opt_values['loss_name']
    optimizer_name = opt_values['optimizer_name']
    # Get implementations
    architecture_imp = utils.get_implementation(architecture.Architecture,
                                                architecture_name)
    dataset_imp = utils.get_implementation(dataset.Dataset, dataset_name)
    loss_imp = utils.get_implementation(loss.Loss, loss_name)
    optimizer_imp = utils.get_implementation(optimizer.Optimizer,
                                             optimizer_name)

    today = time.strftime("%Y-%m-%d_%H:%M")
    log_name = dataset_name + "_" + architecture_name + "_" + loss_name + "_" +\
               opt_values['execution_mode'] + "_" + today +".json"
    json_data = {}
    if hasattr(architecture_imp, 'config_dict'):
        json_data["architecture"] = architecture_imp.config_dict
    if hasattr(loss_imp, 'config_dict'):
        json_data["loss"] = loss_imp.config_dict
    if hasattr(dataset_imp, 'config_dict'):
        json_data["dataset"] = dataset_imp.config_dict
    if hasattr(optimizer_imp, 'config_dict'):
        json_data["optimizer"] = optimizer_imp.config_dict
    json_data["execution_mode"] = opt_values['execution_mode']
    json_data["architecture_name"] = architecture_name
    json_data["dataset_name"] = dataset_name
    json_data["loss_name"] = loss_name
    json_data["optimizer_name"] = optimizer_name

    log_dir = os.path.join(execution_dir, "Logs")
    if not os.path.isdir(log_dir):
        os.makedirs(log_dir)
    log_path = os.path.join(log_dir, log_name)
    with open(log_path, 'w') as outfile:
        json.dump(json_data, outfile)
Пример #3
0
def run_dataset_manager(opt_values):
    dm_name = opt_values['dataset_manager_name']
    dataset_manager_imp = utils.get_implementation(
        dataset_manager.DatasetManager, dm_name)
    dataset_manager_imp.convert_data()
Пример #4
0
def run_training(opt_values):
    """
    Runs the training

    This function is responsible for instanciating dataset, architecture and loss.abs

    Args:
        opt_values: dictionary containing parameters as keys and arguments as values.

    """
    # Get architecture, dataset and loss name
    arch_name = opt_values['architecture_name']
    dataset_name = opt_values['dataset_name']
    loss_name = opt_values['loss_name']
    optimizer_name = opt_values['optimizer_name']

    time_str = time.strftime("%Y-%m-%d_%H:%M")

    if opt_values["execution_mode"] == "train":
        execution_dir = "Executions/final_tests/" + dataset_name + "/" + arch_name + "/" + loss_name +\
                    "/" + time_str
        os.makedirs(execution_dir)
    elif opt_values["execution_mode"] == "restore":
        execution_dir = opt_values["execution_path"]
    log(opt_values, execution_dir)
    # Create summary
    model_dir = os.path.join(execution_dir, "Model")
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    summary_dir = os.path.join(execution_dir, "Summary")
    if not os.path.isdir(summary_dir):
        os.makedirs(summary_dir)

    # Get implementations
    architecture_imp = utils.get_implementation(architecture.Architecture,
                                                arch_name)
    dataset_imp = utils.get_implementation(dataset.Dataset, dataset_name)
    loss_imp = utils.get_implementation(loss.Loss, loss_name)
    optimizer_imp = utils.get_implementation(optimizer.Optimizer,
                                             optimizer_name)

    # Tell TensorFlow that the model will be built into the default Graph.
    graph = tf.Graph()
    with graph.as_default():
        # Create a session for running operations in the Graph.
        sess = tf.Session()
        # if it load step to continue on the same point on dataset
        execution_mode = opt_values["execution_mode"]
        if execution_mode == "train":
            initial_step = 0
        else:
            initial_step = tf.train.load_variable(model_dir, "global_step")
        # Input and target output pairs.
        architecture_input, target_output = dataset_imp.next_batch_train(
            initial_step, sess)

        with tf.variable_scope("model"):
            with tf.variable_scope("architecture"):
                architecture_output = architecture_imp.prediction(
                    architecture_input, training=True)
            loss_op = loss_imp.evaluate(architecture_input,
                                        architecture_output, target_output)
        train_op, global_step = training(loss_op, optimizer_imp)

        if loss_imp.trainable():
            loss_tr = loss_imp.train(optimizer_imp)
        # Merge all train summaries and write
        merged = tf.summary.merge_all()
        # Test
        architecture_input_test, target_output_test, init = dataset_imp.next_batch_test(
        )

        with tf.variable_scope("model", reuse=True):
            with tf.variable_scope("architecture", reuse=True):
                architecture_output_test = architecture_imp.prediction(
                    architecture_input_test, training=False)  # TODO: false?
            loss_op_test = loss_imp.evaluate(architecture_input_test,
                                             architecture_output_test,
                                             target_output_test)
        tf_test_loss = tf.placeholder(tf.float32,
                                      shape=(),
                                      name="tf_test_loss")
        test_loss = tf.summary.scalar('test_loss', tf_test_loss)

        train_summary_dir = os.path.join(summary_dir, "Train")
        test_summary_dir = os.path.join(summary_dir, "Test")
        train_writer = tf.summary.FileWriter(train_summary_dir)
        test_writer = tf.summary.FileWriter(test_summary_dir)
        # # The op for initializing the variables.
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()
        # Initialize the variables (the trained variables and the
        # epoch counter).
        sess.run(init_op)
        sess.run(init)

        if execution_mode == "restore":
            # Restore variables from disk.
            model_file_path = os.path.join(model_dir, "model.ckpt")
            saver.restore(sess, model_file_path)
            print("Model restored.")

        tensorboard_command = get_tensorboard_command(train_summary_dir,
                                                      test_summary_dir)
        print(
            "To run tensorboard, execute the following command in the terminal:"
        )
        print(tensorboard_command)
        step = sess.run(global_step)

        try:
            while True:
                start_time = time.time()
                sess.run(init)
                # Run one step of the model.  The return values are
                # the activations from the `train_op` (which is
                # discarded) and the `loss` op.  To inspect the values
                # of your ops or variables, you may include them in
                # the list passed to sess.run() and the value tensors
                # will be returned in the tuple from the call.

                # Warning: Calling the sess.run will advance the dataset
                # to the next batch.
                if step % architecture_imp.get_summary_writing_period() == 0:
                    # Train Discriminator & Generator
                    if loss_imp.trainable():
                        loss_value, _, _, summary = sess.run(
                            [loss_op, loss_tr, train_op, merged])
                    else:
                        loss_value, _, summary = sess.run(
                            [loss_op, train_op, merged])
                else:
                    # Train Discriminator & Generator
                    if loss_imp.trainable():
                        loss_value, _, _ = sess.run(
                            [loss_op, loss_tr, train_op])
                    else:
                        loss_value, _ = sess.run([loss_op, train_op])
                duration = time.time() - start_time
                #print (duration)
                if step % architecture_imp.get_summary_writing_period() == 0:
                    print('Step %d: loss = %.2f (%.3f sec)' %
                          (step, np.mean(loss_value), duration))
                    train_writer.add_summary(summary, step)

                if step % architecture_imp.get_validation_period() == 0:
                    loss_value_sum = 0.0
                    count_test = 0.0
                    try:
                        start_time = time.time()
                        while True:
                            loss_value_test = sess.run(loss_op_test)
                            count_test = count_test + 1
                            loss_value_sum = loss_value_sum + loss_value_test
                    except tf.errors.OutOfRangeError:
                        duration_test = time.time() - start_time
                        print('Done testing. (%.3f sec)' % (duration_test))
                    loss_value_test = loss_value_sum / count_test
                    summary_test = sess.run(
                        test_loss, feed_dict={tf_test_loss: loss_value_test})
                    test_writer.add_summary(summary_test, step)
                if step % architecture_imp.get_model_saving_period() == 0:
                    # Save the variables to disk.
                    save_path = saver.save(sess, model_dir + "/model.ckpt")
                    print("Model saved in file: %s" % save_path)
                step += 1
        except tf.errors.OutOfRangeError:
            print('Done training, %d steps.' % (step))
        finally:
            print("closing")
            sess.close()