示例#1
0
def load_config(file_dir, is_train, exp_name="E"):
    with open(file_dir) as f:
        config = json.load(f)
    config = edict(config)
    data_path = config.data_path

    config.is_train = is_train
    if hasattr(config.TRAIN, 'hr_img_path') and hasattr(
            config.TRAIN, 'hr_mid_path'):
        config.TRAIN.hr_img_path = os.path.join(data_path,
                                                config.TRAIN.hr_img_path)
        config.TRAIN.hr_saliency_mask = os.path.join(
            data_path, config.TRAIN.hr_saliency_mask)
        config.TRAIN.hr_saliency_map = os.path.join(
            data_path, config.TRAIN.hr_saliency_map)
        config.TRAIN.hr_mid_path = os.path.join(data_path,
                                                config.TRAIN.hr_mid_path)
        config.VALID.hr_img_path = os.path.join(data_path,
                                                config.VALID.hr_img_path)
        config.VALID.hr_saliency_mask = os.path.join(
            data_path, config.VALID.hr_saliency_mask)
        config.VALID.hr_saliency_map = os.path.join(
            data_path, config.VALID.hr_saliency_map)
        config.VALID.hr_mid_path = os.path.join(data_path,
                                                config.VALID.hr_mid_path)

    if config.is_train and config.exp_name and config.is_continue_train == False:
        raise Exception(
            'when config.exp_name is specified, config.is_continue_train mast be True'
        )

    if not config.exp_name:
        config.exp_name = exp_name + time.strftime("_%Y%m%dT%H%M%S",
                                                   time.localtime())

    config.experiment_dir = os.path.join('experiments', config.exp_name)
    config.checkpoint_dir = os.path.join(config.experiment_dir, 'checkpoint')
    config.summary_dir = os.path.join(config.experiment_dir, 'summary')
    config.evaluate_dir = os.path.join(config.experiment_dir, 'evaluate')

    # create the experiments dirs
    create_dirs(
        [config.summary_dir, config.checkpoint_dir, config.evaluate_dir])

    # some default settings...
    if not hasattr(config.MODEL, 'D'): config.MODEL.D = 2
    if not hasattr(config.MODEL, 'C'): config.MODEL.C = 3
    if not hasattr(config.MODEL, 'degradation_model'):
        config.MODEL.degradation_model = "BI"

    if config.is_train:
        log_config(os.path.join(config.experiment_dir, 'config.json'), config)

    return config
def process_config(json_file):
    """
    Get the json file
    Processing it with EasyDict to be accessible as attributes
    then editing the path of the experiments folder
    creating some important directories in the experiment folder
    Then setup the logging in the whole program
    Then return the config
    :param json_file: the path of the config file
    :return: config object(namespace)
    """
    config, _ = get_config_from_json(json_file)
    print(" THE Configuration of your experiment ..")
    pprint(config)

    # making sure that you have provided the exp_name.
    try:
        print(" *************************************** ")
        print("The experiment name is {}".format(config.exp_name))
        print(" *************************************** ")
    except AttributeError:
        print("ERROR!!..Please provide the exp_name in json file..")
        exit(-1)

    # create some important directories to be used for that experiment.
    config.summary_dir_l2h = os.path.join("experiments", config.exp_name,
                                          "summaries_l2h/")
    config.summary_dir_h2l = os.path.join("experiments", config.exp_name,
                                          "summaries_h2l/")
    config.checkpoint_l2h_dir = os.path.join("experiments", config.exp_name,
                                             "checkpoints_l2h/")
    config.checkpoint_h2l_dir = os.path.join("experiments", config.exp_name,
                                             "checkpoints_h2l/")
    config.checkpoint_combined_dir = os.path.join("experiments",
                                                  config.exp_name,
                                                  "checkpoints_combined/")
    config.out_dir = os.path.join("experiments", config.exp_name, "out/")
    config.log_dir = os.path.join("experiments", config.exp_name, "logs/")
    create_dirs([
        config.summary_dir_l2h, config.summary_dir_h2l,
        config.checkpoint_l2h_dir, config.checkpoint_h2l_dir,
        config.checkpoint_combined_dir, config.out_dir, config.log_dir
    ])

    # setup logging in the project
    setup_logging(config.log_dir)

    logging.getLogger().info("Hi, This is root.")
    logging.getLogger().info(
        "After the configurations are successfully processed and dirs are created."
    )
    logging.getLogger().info("The pipeline of the project will begin now.")

    return config
示例#3
0
def train():
    # load config file and prepare experiment
    args = get_args()
    config = process_config(args.config)
    create_dirs([config.model_dir, config.tensorboard_dir])

    # load dataset file
    dataset = load_pair_paths(config)

    # split dataset train and test
    train_pairs, test_pairs = split_dataset(config, dataset)

    if config.debug:
        print("WARNING!!! DEBUG MODE ON! 100 training.")
        train_pairs = train_pairs[:100]
        print(train_pairs)
        test_pairs = test_pairs[:100]
        print(test_pairs)

    # Calculate steps for each epoch
    train_num_steps = calculate_num_iter(config, train_pairs)
    test_num_steps = calculate_num_iter(config, test_pairs)


    # Create the model
    model = depth_model(config)

    #set dynamic output shape
    config.output_size = list(model.output_shape[1:])

    # Create train and test data generators
    train_gen = tf_data_generator(config, train_pairs, is_training=True)
    test_gen = tf_data_generator(config,test_pairs, is_training=False)

    # Prepare for training
    model.compile(optimizer=select_optimizer(config), loss=select_loss(config))


    model.fit(
        train_gen,
        steps_per_epoch=train_num_steps,
        epochs=config.num_epochs,
        callbacks=create_callbacks(config),
        validation_data=test_gen,
        validation_steps=test_num_steps,
        verbose=1)



    print("Training Done.")
示例#4
0
def extract(config):

    model = load_depth_model_from_weights(config)
    dir = "../outputs/" + config.exp_name
    create_dirs([dir])

    for i in range(6):

        img = image.load_img(e_img_path + "/" + str(i + 1) + '.jpg',
                             target_size=(config.input_size[0],
                                          config.input_size[1]))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        prediction = model.predict(x)
        print("prediction shape", prediction.shape)
        prediction = np.reshape(prediction,
                                [prediction.shape[1], prediction.shape[2]])
        plt.imsave(dir + '/ext_pre_depth_' + str(i + 1) + '.jpg', prediction)

    for i in range(6):

        img = image.load_img(d_img_path + "/" + str(i + 1) + '.jpg',
                             target_size=(config.input_size[0],
                                          config.input_size[1]))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        prediction = model.predict(x)
        print("prediction shape", prediction.shape)
        prediction = np.reshape(prediction,
                                [prediction.shape[1], prediction.shape[2]])
        plt.imsave(dir + '/dat_pre_depth_' + str(i + 1) + '.jpg', prediction)

    for i in range(6):

        with open(gt_path + "/" + str(i + 1) + '.pkl', 'rb') as fp:
            depth = pickle.load(fp) / 10.0
            depth = Image.fromarray(depth)
            depth = np.array(depth.resize((160, 112)))
            plt.imsave(dir + '/dat_gt_depth_' + str(i + 1) + '.jpg', depth)

    del model
示例#5
0
import dirs
import random_el

dirs.create_dirs()

input('Директории созданы, введите любой символ для продолжения')

dirs.delete_dirs()

input('Директории удалены, введите любой символ для продолжения')

items = [75, 48, 345, 7638, 6587, 256]
print(random_el.get_random_element(items))
示例#6
0
def create_outputs():

    args = get_args()
    config, _ = get_config_from_json(args.config)
    dir = "../outputs/input"
    create_dirs([dir])

    for i in range(6):
        img = Image.open(e_img_path + "/" + str(i + 1) + '.jpg')
        img = img.resize((config.input_size[1], config.input_size[0]))
        x = np.array(img)
        plt.imsave(dir + '/ext_' + str(i + 1) + '.jpg', x)

    for i in range(6):
        img = Image.open(d_img_path + "/" + str(i + 1) + '.jpg')
        img = img.resize((config.input_size[1], config.input_size[0]))
        x = np.array(img)
        plt.imsave(dir + '/dat_' + str(i + 1) + '.jpg', x)

    config.unpool_type = "simple"
    config.exp_name = "nyu-resnet-berhu-aug-30-simple-upproject"
    config.prediction_model_name = "model-150-0.19.km"
    config.model_dir = os.path.join("../experiments", config.exp_name,
                                    "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name,
                                          "log/")

    extract(config)

    tf.keras.backend.clear_session()

    config.unpool_type = "deconv"
    config.exp_name = "nyu-resnet-berhu-aug-30-deconv-upproject"
    config.prediction_model_name = "model-150-0.21.km"
    config.model_dir = os.path.join("../experiments", config.exp_name,
                                    "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name,
                                          "log/")

    extract(config)

    tf.keras.backend.clear_session()

    config.unpool_type = "checkerboard"
    config.exp_name = "nyu-resnet-berhu-aug-30-checkerboard-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name,
                                    "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name,
                                          "log/")

    extract(config)

    tf.keras.backend.clear_session()

    config.unpool_type = "resize"
    config.exp_name = "nyu-resnet-berhu-aug-30-resize-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name,
                                    "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name,
                                          "log/")

    extract(config)