Пример #1
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    normalize = True

    lidar_grss2013_scale = 5
    lidar_grss2018_scale = lidar_grss2013_scale / 2.5

    loader_name = "GRSS2013DataLoader"
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    grss_2013_data_set = loader.load_data(0, normalize)

    loader_name = "GRSS2018DataLoader"
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    grss_2018_data_set = loader.load_data(0, normalize)

    grss_2013_band = 8
    grss_2018_band = 2

    match_data(grss_2013_band, grss_2018_band, grss_2013_data_set,
               grss_2018_data_set, lidar_grss2013_scale, lidar_grss2018_scale)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)

    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, True)
    shadow_map, _ = loader.load_shadow_map(0, data_set)

    plt.imshow(shadow_map * 255)
    plt.title("figure_name"), plt.xticks([]), plt.yticks([])
    plt.show()

    non_shadow_test_sample = 0
    for point in sample_set.validation_targets:
        if shadow_map[point[1], point[0]] == 1:
            shadow_map[point[1], point[0]] = 0
        else:
            non_shadow_test_sample = non_shadow_test_sample + 1

    plt.imshow(shadow_map * 255)
    plt.title("figure_name"), plt.xticks([]), plt.yticks([])
    plt.show()

    imwrite("shadow_map.tif", shadow_map, planarconfig='contig')
Пример #3
0
    def read_data_set(self, loader_name, path, test_data_ratio, neighborhood,
                      normalize):
        loader = get_class(loader_name + '.' + loader_name)(path)

        model_base_dir = loader.get_model_base_dir()
        for record in tf.python_io.tf_record_iterator(model_base_dir +
                                                      'metadata.tfrecord'):
            example = tf.train.Example()
            example.ParseFromString(record)  # calling protocol buffer API

            training_data_shape = np.array(
                example.features.feature['training_data_shape'].int64_list.
                value)
            testing_data_shape = np.array(
                example.features.feature['testing_data_shape'].int64_list.value
            )
            validation_data_shape = np.array(
                example.features.feature['validation_data_shape'].int64_list.
                value)

        return TFRecordDataInfo(data=TFRecordSpecialData(training_data_shape),
                                path=model_base_dir + 'training.tfrecord'), \
               TFRecordDataInfo(data=TFRecordSpecialData(testing_data_shape),
                                path=model_base_dir + 'test.tfrecord'), \
               TFRecordDataInfo(data=TFRecordSpecialData(validation_data_shape),
                                path=model_base_dir + 'validation.tfrecord'), None, \
               loader.get_class_count(), None, loader.get_target_color_list()
Пример #4
0
    def read_data_set(self, loader_name, path, test_data_ratio, neighborhood, normalize):
        start_time = time.time()

        loader = get_class(loader_name + '.' + loader_name)(path)

        data_set = loader.load_data(neighborhood, normalize)
        sample_set = loader.load_samples(test_data_ratio)

        training_data_shape = numpy.concatenate(
            ([sample_set.training_targets.shape[0]], loader.get_data_shape(data_set)))
        testing_data_shape = numpy.concatenate(
            ([sample_set.test_targets.shape[0]], loader.get_data_shape(data_set)))
        validation_data_shape = numpy.concatenate(
            ([sample_set.validation_targets.shape[0]], loader.get_data_shape(data_set)))

        print('Loaded dataset(%.3f sec)' % (time.time() - start_time))
        return \
            GeneratorDataInfo(
                data=GeneratorSpecialData(shape=training_data_shape, size=numpy.prod(training_data_shape)),
                targets=sample_set.training_targets,
                loader=loader,
                dataset=data_set), \
            GeneratorDataInfo(
                data=GeneratorSpecialData(shape=testing_data_shape, size=numpy.prod(testing_data_shape)),
                targets=sample_set.test_targets,
                loader=loader,
                dataset=data_set), \
            GeneratorDataInfo(
                data=GeneratorSpecialData(shape=validation_data_shape, size=numpy.prod(validation_data_shape)),
                targets=sample_set.validation_targets,
                loader=loader,
                dataset=data_set), \
            data_set.shadow_creator_dict, \
            loader.get_class_count(), loader.get_scene_shape(data_set), loader.get_target_color_list()
Пример #5
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--path',
        nargs='?',
        const=True,
        type=str,
        default='C:/Users/AliGökalp/Documents/phd/data/2013_DFTC/2013_DFTC',
        help='Input data path')
    parser.add_argument(
        '--loader_name',
        nargs='?',
        const=True,
        type=str,
        default='GRSS2013DataLoader',
        help='Data set loader name, Values : GRSS2013DataLoader')
    parser.add_argument('--neighborhood',
                        nargs='?',
                        type=int,
                        default=5,
                        help='Neighborhood for data extraction')
    parser.add_argument('--test_ratio',
                        nargs='?',
                        type=float,
                        default=0.05,
                        help='Ratio of training data to use in testing')
    parser.add_argument('--compressed',
                        nargs='?',
                        const=True,
                        type=bool,
                        default=False,
                        help='If true, performs compression')
    parser.add_argument('--target_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Target path to write the files to')
    flags, unparsed = parser.parse_known_args()

    inmemoryimporter = get_class('InMemoryImporter.InMemoryImporter')()
    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_ratio, class_count, scene_shape, color_list = \
        inmemoryimporter.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    write_metadata_record(os.path.join(flags.target_path, 'metadata.tfrecord'),
                          training_data_with_labels.data,
                          test_data_with_labels.data,
                          validation_data_with_labels.data)

    write_to_tfrecord(os.path.join(flags.target_path, 'training.tfrecord'),
                      training_data_with_labels.data,
                      training_data_with_labels.labels, flags.compressed)
    write_to_tfrecord(os.path.join(flags.target_path, 'test.tfrecord'),
                      test_data_with_labels.data, test_data_with_labels.labels,
                      flags.compressed)
    write_to_tfrecord(os.path.join(flags.target_path, 'validation.tfrecord'),
                      validation_data_with_labels.data,
                      validation_data_with_labels.labels, flags.compressed)
    pass
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path', nargs='?', const=True, type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, True)
    scene_shape = loader.get_scene_shape(data_set)

    target_classes_as_image = create_target_image_via_samples(sample_set, scene_shape)

    shadow_map = get_shadow_map(target_classes_as_image)
    imwrite("muulf_shadow_map.tif", shadow_map, planarconfig='contig')
    create_shadow_corrected_image(data_set.casi, loader.load_data(0, False).casi, shadow_map)
    draw_targets(loader.get_target_color_list(), target_classes_as_image, "Targets")

    # retval, labels, stats, centroids = cv2.connectedComponentsWithStats(shadow_map)
    contours, hierarchy = cv2.findContours(shadow_map, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
    # draw_im = numpy.zeros(shadow_map.shape, dtype=numpy.uint8)
    # cv2.drawContours(draw_im, contours, -1, 255, 3)
    for contour in contours:
        target_map = {}
        fill_targets_for_contour(contour, target_classes_as_image, target_map)
        if BUILDING_SHADOW_CLASS in target_map:
            del target_map[BUILDING_SHADOW_CLASS]
        if INVALID_TARGET_VALUE in target_map:
            del target_map[INVALID_TARGET_VALUE]
        if BUILDING_CLASS in target_map:
            del target_map[BUILDING_CLASS]
        final_neigh_target = None
        final_neigh_count = 0
        for neigh_target, neigh_count in target_map.items():
            if final_neigh_target is not None:
                if neigh_count > final_neigh_count:
                    final_neigh_target = neigh_target
                    final_neigh_count = neigh_count
            else:
                final_neigh_target = neigh_target
                final_neigh_count = neigh_count
        # print(final_neigh_target)
        # print(final_neigh_count)
        if final_neigh_target is None:
            print("found contour with no proper neighbors")
        else:
            image = get_contour_image(shadow_map.shape, contour)
            target_classes_as_image[image] = final_neigh_target
            print("shadow converted to neighboring target %d" % final_neigh_target)

    draw_targets(loader.get_target_color_list(), target_classes_as_image, "Targets after shadow correction")
    # increase target level as one
    target_classes_as_image[target_classes_as_image != INVALID_TARGET_VALUE] = target_classes_as_image[
                                                                                   target_classes_as_image != INVALID_TARGET_VALUE] + 1
    imwrite("muulf_gt_shadow_corrected.tif", target_classes_as_image, planarconfig='contig')
def main(_):
    flags = parse_cmd(argparse.ArgumentParser())

    print('Input information:', flags)

    nn_model = get_class(flags.model_name + '.' + flags.model_name)()

    if flags.max_evals == 1:
        print('Running in single execution training mode')

        algorithm_params = nn_model.get_default_params(flags.batch_size)
        if flags.algorithm_param_path is not None:
            algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))
            # algorithm_params = namedtuple('GenericDict', algorithm_params_dict.keys())(**algorithm_params_dict)
        perform_an_episode(flags, algorithm_params, nn_model,
                           flags.base_log_path)
        # code for dumping the parameters as json
        # json.dump(algorithm_params, open('algorithm_param_output_cnnv4.json', 'w'), indent=3)
    else:
        print('Running in hyper parameter optimization mode')
        model_space_fun = nn_model.get_hyper_param_space

        global episode_run_index
        trial_fileaddress = os.path.join(flags.base_log_path, "trial.p")
        while True:
            try:
                with open(trial_fileaddress, "rb") as read_file:
                    trials = pickle.load(read_file)
                episode_run_index = len(trials.trials)
                best = convert_trial_to_dictvalues(
                    trials.best_trial['misc']['vals'])
            except IOError:
                print("No trials file found. Starting trials from scratch")
                episode_run_index = 0
                trials = Trials()

            if episode_run_index == flags.max_evals:
                break

            best = fmin(
                fn=lambda params: 1 -
                (perform_an_episode(flags, params, nn_model, flags.
                                    base_log_path).validation_accuracy),
                space=model_space_fun(),
                algo=tpe.suggest,
                trials=trials,
                max_evals=episode_run_index + 1)
            pickle.dump(trials, open(trial_fileaddress, "wb"))

        json.dump(trials.results, open('trial_results.json', 'w'), indent=3)
        print(space_eval(model_space_fun(), best))
Пример #8
0
    def read_data_set(self, loader_name, path, train_data_ratio, test_data_ratio, neighborhood, normalize):
        start_time = time.time()

        loader = get_class(loader_name + '.' + loader_name)(path)

        data_set = loader.load_data(neighborhood, normalize)
        sample_set = loader.load_samples(train_data_ratio, test_data_ratio)

        training_data_with_labels = self._get_data_with_labels(sample_set.training_targets, loader, data_set)
        validation_data_with_labels = self._get_data_with_labels(sample_set.validation_targets, loader, data_set)
        test_data_with_labels = self._get_data_with_labels(sample_set.test_targets, loader, data_set)

        print('Loaded dataset(%.3f sec)' % (time.time() - start_time))
        return training_data_with_labels, test_data_with_labels, validation_data_with_labels, data_set.shadow_creator_dict, \
               loader.get_class_count(), loader.get_scene_shape(data_set), loader.get_target_color_list()
Пример #9
0
def load_op(batch_size, iteration_count, loader_name, path):
    neighborhood = 0
    loader = get_class(loader_name + '.' + loader_name)(path)
    data_set = loader.load_data(neighborhood, True)

    shadow_map, shadow_ratio = loader.load_shadow_map(neighborhood, data_set)

    # normal_data_as_matrix, shadow_data_as_matrix = GRSS2013DataLoader.get_targetbased_shadowed_normal_data(data_set,
    #                                                                                     loader,
    #                                                                                     shadow_map,
    #                                                                                     loader.load_samples(0.1))

    if FLAGS.use_target_map:
        normal_data_as_matrix, shadow_data_as_matrix = get_data_from_scene(
            data_set, loader, shadow_map)
    else:
        normal_data_as_matrix, shadow_data_as_matrix = get_all_shadowed_normal_data(
            data_set, loader, shadow_map, multiply_shadowed_data=False)

    # normal_data_as_matrix, shadow_data_as_matrix = create_dummy_shadowed_normal_data(data_set, loader)

    hsi_channel_len = normal_data_as_matrix.shape[3] - 1
    normal_data_as_matrix = normal_data_as_matrix[:, :, :, 0:hsi_channel_len]
    shadow_data_as_matrix = shadow_data_as_matrix[:, :, :, 0:hsi_channel_len]

    normal = tf.placeholder(dtype=normal_data_as_matrix.dtype,
                            shape=normal_data_as_matrix.shape,
                            name='x')
    shadow = tf.placeholder(dtype=shadow_data_as_matrix.dtype,
                            shape=shadow_data_as_matrix.shape,
                            name='y')

    epoch = int(
        (iteration_count * batch_size) / normal_data_as_matrix.shape[0])
    data_set = tf.data.Dataset.from_tensor_slices(
        (normal,
         shadow)).apply(shuffle_and_repeat(buffer_size=10000, count=epoch))
    data_set = data_set.map(
        lambda param_x, param_y_: perform_shadow_augmentation_random(
            param_x, param_y_, shadow_ratio[0:hsi_channel_len]),
        num_parallel_calls=4)
    data_set = data_set.batch(batch_size)
    data_set_itr = data_set.make_initializable_iterator()

    return InitializerHook(data_set_itr, normal, shadow, normal_data_as_matrix,
                           shadow_data_as_matrix)
Пример #10
0
    def read_data_set(self, loader_name, path, train_data_ratio,
                      test_data_ratio, neighborhood, normalize):
        training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
        scene_shape, color_list = \
            self.generator_importer.read_data_set(loader_name=loader_name, path=path, train_data_ratio=train_data_ratio,
                                                  test_data_ratio=test_data_ratio,
                                                  neighborhood=neighborhood, normalize=normalize)
        loader = get_class(loader_name + '.' + loader_name)(path)
        shape = loader.get_data_shape(training_data_with_labels.dataset)
        for index in range(0, 5000):
            self.target_class.append(0)
            element_data = numpy.zeros(shape=shape, dtype=numpy.float)
            element_data[:, :, -1] = numpy.ones(element_data[:, :, -1].shape)
            self.target_data.append(element_data)

        return training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
               scene_shape, color_list
Пример #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, False)
    scene_shape = loader.get_scene_shape(data_set)

    imsave(
        os.path.join(flags.output_path, "result_colorized.tif"),
        create_colored_image(
            create_target_image_via_samples(sample_set, scene_shape),
            loader.get_target_color_list()))
Пример #12
0
def perform_full_scene_classification(data_path, loader_name, neighborhood,
                                      estimator, batch_size):
    loader = get_class(loader_name + '.' + loader_name)(data_path)
    data_set = loader.load_data(neighborhood, False)
    scene_shape = loader.get_scene_shape(data_set)
    all_scene_target_array = GeneratorImporter.GeneratorImporter.create_all_scene_target_array(
        scene_shape)
    predict_pixel_count = scene_shape[0] * scene_shape[1]
    progress_bar = tqdm(total=predict_pixel_count)

    prediction = numpy.empty([predict_pixel_count], dtype=numpy.uint8)
    batch_cache = numpy.empty([
        batch_size,
        loader.get_data_shape(data_set)[0],
        loader.get_data_shape(data_set)[1],
        loader.get_data_shape(data_set)[2]
    ],
                              dtype=numpy.float32)
    current_pixel_index = 0
    batch_pixel_index = 0
    for point in all_scene_target_array:
        batch_cache[batch_pixel_index] = loader.get_point_value(
            data_set, point)
        current_pixel_index = current_pixel_index + 1
        batch_pixel_index = batch_pixel_index + 1
        if current_pixel_index == predict_pixel_count or batch_pixel_index == batch_size:
            point_val = flatten_data(batch_cache[0:batch_pixel_index])
            prediction[(current_pixel_index - batch_pixel_index):current_pixel_index] = \
                estimator.predict(point_val)
            progress_bar.update(batch_pixel_index)
            batch_pixel_index = 0

    progress_bar.close()
    scene_as_image = numpy.reshape(prediction, scene_shape)
    output_path = "."
    imsave(os.path.join(output_path, "result_raw.tif"), scene_as_image)
    imsave(
        os.path.join(output_path, "result_colorized.tif"),
        create_colored_image(scene_as_image, loader.get_target_color_list()))
Пример #13
0
def main(_):
    numpy.set_printoptions(precision=5, suppress=True)
    neighborhood = FLAGS.neighborhood

    _validate_flags()

    loader_name = FLAGS.loader_name
    loader = get_class(loader_name + '.' + loader_name)(FLAGS.path)
    data_set = loader.load_data(neighborhood, True)
    log_dir = "./"

    # test_x_data = numpy.full([1, 1, band_size], fill_value=1.0, dtype=float)
    # print_tensors_in_checkpoint_file(FLAGS.checkpoint_path, tensor_name='ModelX2Y', all_tensors=True)
    shadow_map, shadow_ratio = loader.load_shadow_map(neighborhood, data_set)

    gan_inference_wrapper_dict = {
        "cycle_gan": CycleGANInferenceWrapper(),
        "gan_x2y": GANInferenceWrapper(fetch_shadows=False),
        "gan_y2x": GANInferenceWrapper(fetch_shadows=True)
    }

    hook = gan_inference_wrapper_dict[FLAGS.gan_type].create_inference_hook(
        data_set=data_set,
        loader=loader,
        log_dir=log_dir,
        neighborhood=neighborhood,
        shadow_map=shadow_map,
        shadow_ratio=shadow_ratio,
        validation_sample_count=FLAGS.number_of_samples)

    gpu = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(gpu[0], True)
    with tf.Session() as sess:
        gan_inference_wrapper_dict[
            FLAGS.gan_type].create_generator_restorer().restore(
                sess, FLAGS.checkpoint_path)
        hook.after_create_session(sess, None)
        run_context = SessionRunContext(original_args=None, session=sess)
        hook.after_run(run_context=run_context, run_values=None)
Пример #14
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    model = get_class(flags.model_name + '.' + flags.model_name)()
    algorithm_params = model.get_default_params(flags.batch_size)
    if flags.algorithm_param_path is not None:
        algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))

    importer_name = flags.importer_name
    # data_importer = get_class(importer_name + '.' + importer_name)()
    data_importer = ControlledDataImporter()

    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
    scene_shape, color_list = \
        data_importer.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    validation_data_with_labels = data_importer.create_all_scene_data(
        scene_shape, validation_data_with_labels)
    testing_tensor, training_tensor, validation_tensor = data_importer.convert_data_to_tensor(
        test_data_with_labels, training_data_with_labels,
        validation_data_with_labels, class_range)

    deep_nn_template = tf.make_template('nn_core',
                                        model.create_tensor_graph,
                                        class_count=class_range.stop)

    start_time = time.time()

    validation_data_set = validation_tensor.dataset

    validation_input_iter = simple_nn_iterator(validation_data_set,
                                               flags.batch_size)
    validation_images, validation_labels = validation_input_iter.get_next()
    model_input_params = ModelInputParams(x=validation_images,
                                          y=None,
                                          device_id='/gpu:0',
                                          is_training=False)
    validation_tensor_outputs = deep_nn_template(
        model_input_params, algorithm_params=algorithm_params)
    validation_nn_params = NNParams(
        input_iterator=validation_input_iter,
        data_with_labels=None,
        metrics=None,
        predict_tensor=validation_tensor_outputs.y_conv)
    validation_nn_params.data_with_labels = validation_data_with_labels

    saver = tf.train.Saver(var_list=slim.get_variables_to_restore(
        include=["nn_core"], exclude=["image_gen_net_"]))
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = False
    config.gpu_options.per_process_gpu_memory_fraction = 1.0

    with tf.Session(config=config) as session:
        # Restore variables from disk.
        saver.restore(session, flags.base_log_path)

        # Init for imaging the results
        validation_tensor.importer.perform_tensor_initialize(
            session, validation_tensor, validation_nn_params)

        histogram_tensors = []
        histogram_tensor_names = []
        result_map = {}
        bin_map = {}
        base_bin = 480 / calculate_tensor_size(
            validation_tensor_outputs.histogram_tensors[0].tensor)
        for histogram_tensor_instance in validation_tensor_outputs.histogram_tensors:
            histogram_tensors.append(histogram_tensor_instance.tensor)
            histogram_tensor_names.append(histogram_tensor_instance.name)
            result_map[histogram_tensor_instance.name] = []
            bin_map[histogram_tensor_instance.name] = int(
                base_bin *
                calculate_tensor_size(histogram_tensor_instance.tensor))

        sample_idx = 0
        while True:
            try:
                # prediction, current_prediction = session.run([
                #     tf.argmax(validation_nn_params.predict_tensor, 1), histogram_tensors])

                current_prediction = session.run(histogram_tensors)
                if sample_idx > 2000:
                    for tensor_result, tensor_name in zip(
                            current_prediction, histogram_tensor_names):
                        result_map[tensor_name].append(tensor_result)
                if sample_idx == 5000:
                    break

                sample_idx = sample_idx + 1

            except tf.errors.OutOfRangeError:
                break

        for tensor_name in histogram_tensor_names:
            range_min = sys.float_info.max
            range_max = sys.float_info.min
            for result in result_map[tensor_name]:
                range_min = min(range_min, result.min())
                range_max = max(range_max, result.max())

            all_hists = numpy.zeros(
                [len(result_map[tensor_name]), bin_map[tensor_name]],
                dtype=numpy.int)
            bin_edges = None
            for idx, result in enumerate(result_map[tensor_name]):
                hist, bin_edges = numpy.histogram(result,
                                                  range=(range_min, range_max),
                                                  bins=bin_map[tensor_name])
                all_hists[idx] = hist

            mean = numpy.mean(all_hists, axis=0)
            plt.plot(bin_edges[:-1], mean, label=tensor_name)
            plt.xlim(min(bin_edges), max(bin_edges))
            plt.xlabel("Value")
            plt.ylabel("Counts")
            plt.fill_between(bin_edges[:-1], mean)
            plt.title(tensor_name)
            plt.savefig(
                os.path.join(flags.output_path, tensor_name + "_fig.jpg"))
            plt.clf()

    print('Done evaluation(%.3f sec)' % (time.time() - start_time))
def perform_an_episode(flags, algorithm_params, model, base_log_path):
    importer_name = flags.importer_name
    data_importer = get_class(importer_name + '.' + importer_name)()

    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
    scene_shape, color_list = \
        data_importer.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    shadow_struct = None
    if flags.augment_data_with_shadow is not None:
        shadow_struct = shadow_dict[flags.augment_data_with_shadow]

    augmentation_info = AugmentationInfo(
        shadow_struct=shadow_struct,
        perform_shadow_augmentation=flags.augment_data_with_shadow is not None,
        perform_rotation_augmentation=flags.augment_data_with_rotation,
        perform_reflection_augmentation=flags.augment_data_with_reflection,
        offline_or_online=flags.offline_augmentation,
        augmentation_random_threshold=flags.augmentation_random_threshold)

    batch_size = algorithm_params["batch_size"]
    epoch = flags.epoch
    if epoch is None:
        required_steps = flags.step
    else:
        required_steps = int(
            ((training_data_with_labels.data.shape[0] * epoch) / batch_size))

    global episode_run_index
    print('Starting episode#%d with steps#%d : %s' %
          (episode_run_index, required_steps, algorithm_params))

    validation_accuracy_list = []
    testing_accuracy_list = []
    loss_list = []

    device_id = '/gpu:0'
    if flags.device == "gpu":
        device_id = '/gpu:0'
    elif flags.device == "cpu":
        device_id = '/cpu:0'
    elif flags.device == "tpu":
        device_id = None

    for run_index in range(0, flags.split_count):
        with tf.Graph().as_default():
            # Set random seed as the same value to get consistent results
            tf.set_random_seed(1234)

            print('Starting Run #%d' % run_index)

            testing_tensor, training_tensor, validation_tensor = data_importer.convert_data_to_tensor(
                test_data_with_labels, training_data_with_labels,
                validation_data_with_labels, class_range)

            # Collect unnecessary data
            gc.collect()

            cross_entropy, learning_rate, testing_nn_params, training_nn_params, validation_nn_params, train_step = create_graph(
                training_tensor.dataset,
                testing_tensor.dataset,
                validation_tensor.dataset,
                class_range,
                batch_size,
                device_id,
                epoch,
                augmentation_info=augmentation_info,
                algorithm_params=algorithm_params,
                model=model,
                create_separate_validation_branch=data_importer.
                requires_separate_validation_branch)

            # Collect unnecessary data
            gc.collect()

            #######################################################################
            training_nn_params.data_with_labels = training_data_with_labels
            testing_nn_params.data_with_labels = test_data_with_labels
            validation_nn_params.data_with_labels = validation_data_with_labels
            ############################################################################

            if not flags.perform_validation:
                validation_nn_params = None

            tf.summary.scalar('training_cross_entropy', cross_entropy)
            tf.summary.scalar('training_learning_rate', learning_rate)

            tf.summary.text('test_confusion',
                            tf.as_string(testing_nn_params.metrics.confusion))
            tf.summary.scalar('test_overall_accuracy',
                              testing_nn_params.metrics.accuracy)

            tf.summary.text(
                'validation_confusion',
                tf.as_string(validation_nn_params.metrics.confusion))
            tf.summary.scalar('validation_overall_accuracy',
                              validation_nn_params.metrics.accuracy)

            tf.summary.scalar(
                'validation_average_accuracy',
                validation_nn_params.metrics.mean_per_class_accuracy)
            tf.summary.scalar('validation_kappa',
                              validation_nn_params.metrics.kappa)

            episode_start_time = time.time()

            log_dir = os.path.join(
                base_log_path, 'log/episode_' + str(episode_run_index) +
                '/run_' + str(run_index))
            training_result = run_monitored_session(
                cross_entropy, log_dir, required_steps, class_range,
                flags.save_checkpoint_steps, flags.validation_steps,
                train_step, augmentation_info, flags.device,
                training_nn_params, training_tensor, testing_nn_params,
                testing_tensor, validation_nn_params, validation_tensor)

            print('Done training for %.3f sec' %
                  (time.time() - episode_start_time))

            testing_accuracy_list.append(training_result.test_accuracy)
            loss_list.append(training_result.loss)
            if flags.perform_validation:
                print(
                    'Run #%d, Validation accuracy=%g, Testing accuracy=%g, loss=%.2f'
                    % (run_index, training_result.validation_accuracy,
                       training_result.test_accuracy, training_result.loss))
                validation_accuracy_list.append(
                    training_result.validation_accuracy)
            else:
                print('Run #%d, Testing accuracy=%g, loss=%.2f' %
                      (run_index, training_result.test_accuracy,
                       training_result.loss))

    mean_validation_accuracy = None
    if flags.perform_validation:
        mean_validation_accuracy = mean(validation_accuracy_list)
        std_validation_accuracy = std(validation_accuracy_list)
        print('Validation result: (%g) +- (%g)' %
              (mean_validation_accuracy, std_validation_accuracy))
    mean_testing_accuracy = mean(testing_accuracy_list)
    std_testing_accuracy = std(testing_accuracy_list)
    mean_loss = mean(loss_list)
    std_loss = std(loss_list)
    print(
        'Mean testing accuracy result: (%g) +- (%g), Loss result: (%g) +- (%g)'
        % (mean_testing_accuracy, std_testing_accuracy, mean_loss, std_loss))

    episode_run_index = episode_run_index + 1

    return TrainingResult(validation_accuracy=mean_validation_accuracy,
                          test_accuracy=mean_testing_accuracy,
                          loss=mean_loss)
Пример #16
0
def main(_):
    log_dir = FLAGS.train_log_dir
    if not tf.gfile.Exists(log_dir):
        tf.gfile.MakeDirs(log_dir)

    with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
        validation_iteration_count = FLAGS.validation_itr_count
        validation_sample_count = FLAGS.validation_sample_count
        loader_name = FLAGS.loader_name
        neighborhood = 0
        loader = get_class(loader_name + '.' + loader_name)(FLAGS.path)
        data_set = loader.load_data(neighborhood, True)

        shadow_map, shadow_ratio = loader.load_shadow_map(
            neighborhood, data_set)

        with tf.name_scope('inputs'):
            initializer_hook = load_op(FLAGS.batch_size,
                                       FLAGS.max_number_of_steps, loader,
                                       data_set, shadow_map, shadow_ratio,
                                       FLAGS.regularization_support_rate)
            training_input_iter = initializer_hook.input_itr
            images_x, images_y = training_input_iter.get_next()
            # Set batch size for summaries.
            # images_x.set_shape([FLAGS.batch_size, None, None, None])
            # images_y.set_shape([FLAGS.batch_size, None, None, None])

        # Define model.
        gan_type = FLAGS.gan_type
        gan_train_wrapper_dict = {
            "cycle_gan":
            CycleGANWrapper(cycle_consistency_loss_weight=FLAGS.
                            cycle_consistency_loss_weight,
                            identity_loss_weight=FLAGS.identity_loss_weight,
                            use_identity_loss=FLAGS.use_identity_loss),
            "gan_x2y":
            GANWrapper(identity_loss_weight=FLAGS.identity_loss_weight,
                       use_identity_loss=FLAGS.use_identity_loss,
                       swap_inputs=False),
            "gan_y2x":
            GANWrapper(identity_loss_weight=FLAGS.identity_loss_weight,
                       use_identity_loss=FLAGS.use_identity_loss,
                       swap_inputs=True)
        }
        wrapper = gan_train_wrapper_dict[gan_type]

        with tf.variable_scope('Model', reuse=tf.AUTO_REUSE):
            the_gan_model = wrapper.define_model(images_x, images_y)
            peer_validation_hook = wrapper.create_validation_hook(
                data_set, loader, log_dir, neighborhood, shadow_map,
                shadow_ratio, validation_iteration_count,
                validation_sample_count)

            the_gan_loss = wrapper.define_loss(the_gan_model)

        # Define CycleGAN train ops.
        train_ops = _define_train_ops(the_gan_model, the_gan_loss)

        # Training
        train_steps = tfgan.GANTrainSteps(1, 1)
        status_message = tf.string_join([
            'Starting train step: ',
            tf.as_string(tf.train.get_or_create_global_step())
        ],
                                        name='status_message')
        if not FLAGS.max_number_of_steps:
            return

        gpu = tf.config.experimental.list_physical_devices('GPU')
        tf.config.experimental.set_memory_growth(gpu[0], True)

        training_scaffold = Scaffold(saver=tf.train.Saver(max_to_keep=20))

        gan_train(
            train_ops,
            log_dir,
            scaffold=training_scaffold,
            save_checkpoint_steps=validation_iteration_count,
            get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps),
            hooks=[
                initializer_hook, peer_validation_hook,
                tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps),
                tf.train.LoggingTensorHook([status_message], every_n_iter=1000)
            ],
            master=FLAGS.master,
            is_chief=FLAGS.task == 0)
Пример #17
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    model = get_class(flags.model_name + '.' + flags.model_name)()
    algorithm_params = model.get_default_params(flags.batch_size)
    if flags.algorithm_param_path is not None:
        algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))

    importer_name = flags.importer_name
    data_importer = get_class(importer_name + '.' + importer_name)()

    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
    scene_shape, color_list = \
        data_importer.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    validation_data_with_labels = data_importer.create_all_scene_data(
        scene_shape, validation_data_with_labels)
    testing_tensor, training_tensor, validation_tensor = data_importer.convert_data_to_tensor(
        test_data_with_labels, training_data_with_labels,
        validation_data_with_labels, class_range)

    deep_nn_template = tf.make_template('nn_core',
                                        model.create_tensor_graph,
                                        class_count=class_range.stop)

    start_time = time.time()

    validation_data_set = validation_tensor.dataset

    validation_input_iter = simple_nn_iterator(validation_data_set,
                                               flags.batch_size)
    validation_images, validation_labels = validation_input_iter.get_next()
    model_input_params = ModelInputParams(x=validation_images,
                                          y=None,
                                          device_id='/gpu:0',
                                          is_training=False)
    validation_tensor_outputs = deep_nn_template(
        model_input_params, algorithm_params=algorithm_params)
    validation_nn_params = NNParams(
        input_iterator=validation_input_iter,
        data_with_labels=None,
        metrics=None,
        predict_tensor=validation_tensor_outputs.y_conv)
    validation_nn_params.data_with_labels = validation_data_with_labels

    prediction = numpy.empty([scene_shape[0] * scene_shape[1]],
                             dtype=numpy.uint8)

    saver = tf.train.Saver(var_list=slim.get_variables_to_restore(
        include=["nn_core"], exclude=["image_gen_net_"]))
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = False
    config.gpu_options.per_process_gpu_memory_fraction = 1.0

    with tf.Session(config=config) as session:
        # Restore variables from disk.
        saver.restore(session, flags.base_log_path)

        # Init for imaging the results
        validation_tensor.importer.perform_tensor_initialize(
            session, validation_tensor, validation_nn_params)
        perform_prediction(session, validation_nn_params, prediction)
        scene_as_image = numpy.reshape(prediction, scene_shape)

        imsave(os.path.join(flags.output_path, "result_raw.tif"),
               scene_as_image)

        imsave(os.path.join(flags.output_path, "result_colorized.tif"),
               create_colored_image(scene_as_image, color_list))

        # Init for accuracy
        # validation_tensor.importer.perform_tensor_initialize(session, validation_tensor, validation_nn_params)
        # validation_accuracy = calculate_accuracy(session, validation_nn_params)
        # print('Validation accuracy=%g' % validation_accuracy)

    print('Done evaluation(%.3f sec)' % (time.time() - start_time))
Пример #18
0
def main(_):
    _validate_flags()
    make_them_shadow = FLAGS.make_them_shadow

    loader_name = FLAGS.loader_name
    loader = get_class(loader_name + '.' + loader_name)(FLAGS.path)
    data_set = loader.load_data(0, True)
    offset = data_set.casi_min
    multiplier = data_set.casi_max
    if offset is None:
        offset = 0
    if multiplier is None:
        multiplier = 1
    target_data_type = loader.get_original_data_type()
    shadow_map, shadow_ratio = loader.load_shadow_map(0, data_set)

    scene_shape = loader.get_scene_shape(data_set)
    element_size = loader.get_data_shape(data_set)
    element_size = [element_size[0], element_size[1], element_size[2] - 1]

    convert_only_the_convenient_pixels = not FLAGS.convert_all
    if make_them_shadow == "shadow":
        shadow = True
        sign_to_filter_in_shadow_map = 0
    elif make_them_shadow == "deshadow":
        shadow = False
        sign_to_filter_in_shadow_map = 1
    else:
        shadow = True
        sign_to_filter_in_shadow_map = -1
        make_them_shadow = "none"

    gan_inference_wrapper_dict = {
        "cycle_gan": CycleGANInferenceWrapper(),
        "gan_x2y": GANInferenceWrapper(fetch_shadows=False),
        "gan_y2x": GANInferenceWrapper(fetch_shadows=True)
    }

    input_tensor, output_tensor = gan_inference_wrapper_dict[
        FLAGS.gan_type].make_inference_graph(data_set,
                                             loader,
                                             shadow,
                                             clip_invalid_values=False)

    gpu = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(gpu[0], True)
    with tf.Session() as sess:
        if make_them_shadow != "none":
            gan_inference_wrapper_dict[
                FLAGS.gan_type].create_generator_restorer().restore(
                    sess, FLAGS.checkpoint_path)

        screen_size_first_dim = scene_shape[0]
        screen_size_sec_dim = scene_shape[1]

        progress_bar = tqdm(total=screen_size_first_dim * screen_size_sec_dim)
        band_size = element_size[2]
        hsi_image = numpy.zeros(
            [screen_size_first_dim, screen_size_sec_dim, band_size],
            dtype=target_data_type)
        for first_idx in range(0, screen_size_first_dim):
            for second_idx in range(0, screen_size_sec_dim):
                input_data = loader.get_point_value(
                    data_set, [second_idx, first_idx])[:, :, 0:band_size]
                input_data = numpy.expand_dims(input_data, axis=0)

                if not convert_only_the_convenient_pixels or shadow_map[
                        first_idx, second_idx] == sign_to_filter_in_shadow_map:
                    generated_y_data = export(sess, input_tensor, input_data,
                                              output_tensor)
                else:
                    generated_y_data = input_data

                hsi_image[first_idx, second_idx, :] = \
                    ((generated_y_data * multiplier) + offset).astype(target_data_type)

                progress_bar.update(1)

        progress_bar.close()

        if convert_only_the_convenient_pixels:
            convert_region_suffix = ""
        else:
            convert_region_suffix = "all"

        imwrite(os.path.join(
            FLAGS.output_path,
            f"shadow_image_{make_them_shadow}_{convert_region_suffix}.tif"),
                hsi_image,
                planarconfig='contig')

        hsi_image = hsi_image.astype(float)
        hsi_image -= offset
        hsi_image /= multiplier
        hsi_as_rgb = (
            get_rgb_from_hsi(loader.get_band_measurements(), hsi_image) *
            256).astype(numpy.uint8)
        imwrite(
            os.path.join(
                FLAGS.output_path,
                f"shadow_image_rgb_{make_them_shadow}_{convert_region_suffix}.tif"
            ), hsi_as_rgb)
Пример #19
0
def main(_):
    numpy.set_printoptions(precision=5, suppress=True)
    neighborhood = FLAGS.neighborhood

    _validate_flags()

    loader_name = FLAGS.loader_name
    loader = get_class(loader_name + '.' + loader_name)(FLAGS.path)
    data_set = loader.load_data(neighborhood, True)

    element_size = loader.get_data_shape(data_set)
    element_size = [element_size[0], element_size[1], element_size[2] - 1]
    images_x_hwc_pl, generated_y = make_inference_graph(model_forward_generator_name, element_size,
                                                        clip_invalid_values=False)
    images_y_hwc_pl, generated_x = make_inference_graph(model_backward_generator_name, element_size,
                                                        clip_invalid_values=False)

    # print_tensors_in_checkpoint_file(FLAGS.checkpoint_path, tensor_name='ModelX2Y', all_tensors=True)

    with tf.Session() as sess:
        create_generator_restorer().restore(sess, FLAGS.checkpoint_path)

        shadow_map, shadow_ratio = loader.load_shadow_map(neighborhood, data_set)
        # neighborhood aware indice finder
        if neighborhood > 0:
            indices = numpy.where(shadow_map[neighborhood:-neighborhood, neighborhood:-neighborhood] == 0)
        else:
            indices = numpy.where(shadow_map == 0)

        iteration_count = 3000*2
        band_size = element_size[2]
        total_band_ratio = numpy.zeros([1, 1, band_size], dtype=float)
        for i in range(0, iteration_count):
            # Pick a random point
            data_indice = random.randint(0, indices[0].size - 1)

            test_indice = [indices[1][data_indice], indices[0][data_indice]]
            test_x_data = loader.get_point_value(data_set, test_indice)
            test_x_data = test_x_data[:, :, 0:band_size]

            # test_x_data = numpy.full([1, 1, band_size], fill_value=1.0, dtype=float)

            generated_y_data = export(sess, images_x_hwc_pl, test_x_data, generated_y)
            generated_x_data = export(sess, images_y_hwc_pl, generated_y_data, generated_x)

            band_ratio = numpy.mean(generated_y_data / test_x_data, axis=(0, 1))
            shadow_calc_ratio = band_ratio * shadow_ratio[0:band_size]

            is_there_inf = numpy.any(numpy.isinf(band_ratio))
            is_there_nan = numpy.any(numpy.isnan(band_ratio))
            if is_there_inf or is_there_nan:
                print("inf or nan value")
            else:
                total_band_ratio = total_band_ratio + band_ratio

            if iteration_count == 1:
                print_info(test_x_data, generated_x_data, generated_y_data, band_ratio, shadow_calc_ratio)

        print("Mean total ratio")
        print(total_band_ratio / iteration_count)
        print("Mean Generated vs Original Ratio")
        print(total_band_ratio / iteration_count * shadow_ratio[0:band_size])
def main(_):
    _validate_flags()
    convert_only_the_convenient_pixels = True
    make_them_shadow = True

    loader_name = FLAGS.loader_name
    loader = get_class(loader_name + '.' + loader_name)(FLAGS.path)
    data_set = loader.load_data(0, True)
    offset = data_set.casi_min
    multiplier = data_set.casi_max
    if offset is None:
        offset = 0
    if multiplier is None:
        multiplier = 1
    target_data_type = loader.get_original_data_type()
    shadow_map, shadow_ratio = loader.load_shadow_map(0, data_set)

    scene_shape = loader.get_scene_shape(data_set)
    element_size = loader.get_data_shape(data_set)
    element_size = [element_size[0], element_size[1], element_size[2] - 1]

    if make_them_shadow:
        model_name = model_forward_generator_name
        sign_to_filter_in_shadow_map = 0
    else:
        model_name = model_backward_generator_name
        sign_to_filter_in_shadow_map = 1

    images_hwc_pl, generated_output = make_inference_graph(
        model_name, element_size, clip_invalid_values=False)

    with tf.Session() as sess:
        create_generator_restorer().restore(sess, FLAGS.checkpoint_path)

        screen_size_first_dim = scene_shape[0]
        screen_size_sec_dim = scene_shape[1]

        progress_bar = tqdm(total=screen_size_first_dim * screen_size_sec_dim)
        band_size = element_size[2]
        hsi_image = numpy.zeros(
            [screen_size_first_dim, screen_size_sec_dim, band_size],
            dtype=target_data_type)
        for first_idx in range(0, screen_size_first_dim):
            for second_idx in range(0, screen_size_sec_dim):
                input_data = loader.get_point_value(
                    data_set, [second_idx, first_idx])[:, :, 0:band_size]

                if not convert_only_the_convenient_pixels or (
                        convert_only_the_convenient_pixels
                        and shadow_map[first_idx, second_idx]
                        == sign_to_filter_in_shadow_map):
                    generated_y_data = export(sess, images_hwc_pl, input_data,
                                              generated_output)
                else:
                    generated_y_data = input_data

                hsi_image[first_idx, second_idx, :] = \
                    ((generated_y_data * multiplier) + offset).astype(target_data_type)

                progress_bar.update(1)

        progress_bar.close()
        imwrite(os.path.join(FLAGS.output_path, "shadow_image.tif"),
                hsi_image,
                planarconfig='contig')