예제 #1
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    # try:


    data_loader = DataLoader(data_dir, config)
    data_loader.load_directory('.tif')
    data_loader.create_np_arrays()
    data_loader.create_data_label_pairs()

    preptt = PrepTrainTest(config, data_loader)

    for data_label_pair in data_loader.data_label_pairs:
        x_data = data_label_pair[0][data_loader]
        y_true = data_label_pair[1][data_loader.data_label_pairs[i][1][:, :, 0]]

        preptt.add_data(x_data, y_true)

    # Create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir, config.input_dir])

    # Create tensorflow session
    sess = tf.Session()

    # Create instance of the model you want
    model = PopModel(config)

    # Load model if exist
    model.load(sess)

    # Create Tensorboard logger
    logger = Logger(sess, config)
    logger.log_config()

    # Create your data generator
    data = DataGenerator(config, preptraintest = preptt)

    data.create_traintest_data()

    # Create trainer and path all previous components to it
    trainer = PopTrainer(sess, model, data, config, logger)

    # Train model
    trainer.train()
예제 #2
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    # try:
    args = get_args()
    if args.config != 'None':
        config = process_config(args.config)
    else:
        config = process_config(os.path.join(config_dir, 'example.json'))

    data_loader = DataLoader(data_dir)
    data_loader.load_directory('.tif')
    data_loader.create_np_arrays()

    preptt = PrepTrainTest(data_loader.arrays[0], data_loader.arrays[1], config.batch_size, config.chunk_height, config.chunk_width)
    prepd = PrepData(data_loader.arrays[0], data_loader.arrays[1], config.batch_size, config.chunk_height, config.chunk_width)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = PopModel(config)
    #load model if exist
    model.load(sess)
    # create your data generator
    data = DataGenerator(config, preptt, prepd)

    data.create_traintest_data()
    data.create_data()

    # Create Tensorboard logger
    logger = Logger(sess, config)

    # Create trainer and path all previous components to it
    tester = PopTrainer(sess, model, data, config, logger)

    # Test model
    tester.test()
예제 #3
0
def main():
    data_loader = DataLoader(data_dir, config)
    data_loader.load_directory('.tif')
    data_loader.create_np_arrays()

    prepd = PrepData(config, data_loader)

    start_raster = data_loader.arrays[-1]

    prepd.add_data(start_raster)

    # Create the experiments output dir
    create_dirs([
        config.output_dir, config.output_pred_dir, config.output_dif_dir,
        config.output_eval_dir
    ])

    # Create tensorflow session
    sess = tf.Session()

    # Create instance of the model
    model = PopModel(config)

    # Load model if exist
    model.load(sess)

    # Create data generator
    data = DataGenerator(config, prepdata=prepd)

    start_raster = data.prepdata.x_data[0][:, :, 0]
    prev_raster = start_raster

    with sess:
        rasters = []

        for k in range(config.num_outputs):
            data.prepdata.output_nr = k
            data.create_data()

            cur_row = 0
            cur_col = 0

            chunk_height = data.prepdata.chunk_height
            chunk_width = data.prepdata.chunk_width

            chunk_rows = data.prepdata.chunk_rows
            chunk_cols = data.prepdata.chunk_cols

            output_raster = np.empty(
                (chunk_rows * chunk_height, chunk_cols * chunk_width))

            # Predicting for each batch
            for i in range(data.batch_num):
                #y_pred = sess.run(model.y, feed_dict={model.x: data.input[0][i]})
                y_pred, y_pred_chunk = sess.run(
                    [model.y, model.y_chunk],
                    feed_dict={
                        model.x: data.input[0][i],
                        model.x_pop_chunk: data.x_chunk_pop[0][i],
                        model.x_proj: config.pop_proj[k],
                        model.x_cur_pop: data.prepdata.x_cur_pop[0]
                    })
                y_pred = y_pred.reshape(config.batch_size, chunk_height,
                                        chunk_width)

                for j in range(config.batch_size):
                    if chunk_cols == cur_col:  # Change to new row and reset column if it reaches the end
                        cur_row += 1
                        cur_col = 0

                    output_raster[cur_row * chunk_height: (cur_row + 1) * chunk_height, cur_col * chunk_width: (cur_col + 1) * chunk_width] = \
                        y_pred[j, :, :]

                    cur_col += 1

            # Removes null-cells at the start of the array
            output_raster = output_raster[data.prepdata.offset_rows:,
                                          data.prepdata.offset_cols:]

            # Makes sure the right amount of null-cells are removed from the end of the array
            if data.prepdata.row_null_cells == 0 and data.prepdata.col_null_cells == 0:
                pass
            elif data.prepdata.row_null_cells == 0:
                output_raster = output_raster[:, :-data.prepdata.
                                              col_null_cells]
            elif data.prepdata.col_null_cells == 0:
                output_raster = output_raster[:-data.prepdata.
                                              row_null_cells, :]
            else:
                output_raster = output_raster[:-data.prepdata.row_null_cells, :
                                              -data.prepdata.col_null_cells]

            # Removes the previous input data and adds the output raster
            data.prepdata.x_data = []

            # Replaces the old population with the predicted one, keeps the other features constant
            new_input = data_loader.arrays[-1]
            new_input[:, :, 0] = output_raster[:new_input.shape[0], :new_input.
                                               shape[1]]

            data.prepdata.add_data(new_input)
            rasters.append(output_raster)

            print('Min value pop: {}'.format(np.amin(output_raster)))
            print('Max value pop: {}'.format(np.amax(output_raster)))
            print('Sum value pop: {}'.format(np.sum(output_raster)))
            # Calculating back to population
            # norm_sum = np.sum(output_raster)
            # final_pop = np.sum(pop_arr_14)
            #
            # output_raster = (output_raster / norm_sum) * final_pop

            print(np.max(output_raster))
            print(np.min(output_raster))
            print(output_raster.shape)

            data_writer = DataWriter(data_loader.geotif[0], start_raster,
                                     prev_raster, output_raster, config)
            data_writer.write_outputs()
            prev_raster = output_raster