Exemple #1
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    normalize = True

    lidar_grss2013_scale = 5
    lidar_grss2018_scale = lidar_grss2013_scale / 2.5

    loader_name = "GRSS2013DataLoader"
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    grss_2013_data_set = loader.load_data(0, normalize)

    loader_name = "GRSS2018DataLoader"
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    grss_2018_data_set = loader.load_data(0, normalize)

    grss_2013_band = 8
    grss_2018_band = 2

    match_data(grss_2013_band, grss_2018_band, grss_2013_data_set,
               grss_2018_data_set, lidar_grss2013_scale, lidar_grss2018_scale)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)

    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, True)
    shadow_map, _ = loader.load_shadow_map(0, data_set)

    plt.imshow(shadow_map * 255)
    plt.title("figure_name"), plt.xticks([]), plt.yticks([])
    plt.show()

    non_shadow_test_sample = 0
    for point in sample_set.validation_targets:
        if shadow_map[point[1], point[0]] == 1:
            shadow_map[point[1], point[0]] = 0
        else:
            non_shadow_test_sample = non_shadow_test_sample + 1

    plt.imshow(shadow_map * 255)
    plt.title("figure_name"), plt.xticks([]), plt.yticks([])
    plt.show()

    imwrite("shadow_map.tif", shadow_map, planarconfig='contig')
Exemple #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path', nargs='?', const=True, type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, True)
    scene_shape = loader.get_scene_shape(data_set)

    target_classes_as_image = create_target_image_via_samples(sample_set, scene_shape)

    shadow_map = get_shadow_map(target_classes_as_image)
    imwrite("muulf_shadow_map.tif", shadow_map, planarconfig='contig')
    create_shadow_corrected_image(data_set.casi, loader.load_data(0, False).casi, shadow_map)
    draw_targets(loader.get_target_color_list(), target_classes_as_image, "Targets")

    # retval, labels, stats, centroids = cv2.connectedComponentsWithStats(shadow_map)
    contours, hierarchy = cv2.findContours(shadow_map, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
    # draw_im = numpy.zeros(shadow_map.shape, dtype=numpy.uint8)
    # cv2.drawContours(draw_im, contours, -1, 255, 3)
    for contour in contours:
        target_map = {}
        fill_targets_for_contour(contour, target_classes_as_image, target_map)
        if BUILDING_SHADOW_CLASS in target_map:
            del target_map[BUILDING_SHADOW_CLASS]
        if INVALID_TARGET_VALUE in target_map:
            del target_map[INVALID_TARGET_VALUE]
        if BUILDING_CLASS in target_map:
            del target_map[BUILDING_CLASS]
        final_neigh_target = None
        final_neigh_count = 0
        for neigh_target, neigh_count in target_map.items():
            if final_neigh_target is not None:
                if neigh_count > final_neigh_count:
                    final_neigh_target = neigh_target
                    final_neigh_count = neigh_count
            else:
                final_neigh_target = neigh_target
                final_neigh_count = neigh_count
        # print(final_neigh_target)
        # print(final_neigh_count)
        if final_neigh_target is None:
            print("found contour with no proper neighbors")
        else:
            image = get_contour_image(shadow_map.shape, contour)
            target_classes_as_image[image] = final_neigh_target
            print("shadow converted to neighboring target %d" % final_neigh_target)

    draw_targets(loader.get_target_color_list(), target_classes_as_image, "Targets after shadow correction")
    # increase target level as one
    target_classes_as_image[target_classes_as_image != INVALID_TARGET_VALUE] = target_classes_as_image[
                                                                                   target_classes_as_image != INVALID_TARGET_VALUE] + 1
    imwrite("muulf_gt_shadow_corrected.tif", target_classes_as_image, planarconfig='contig')
def main(_):
    flags = parse_cmd(argparse.ArgumentParser())

    print('Input information:', flags)

    nn_model = get_class(flags.model_name + '.' + flags.model_name)()

    if flags.max_evals == 1:
        print('Running in single execution training mode')

        algorithm_params = nn_model.get_default_params(flags.batch_size)
        if flags.algorithm_param_path is not None:
            algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))
            # algorithm_params = namedtuple('GenericDict', algorithm_params_dict.keys())(**algorithm_params_dict)
        perform_an_episode(flags, algorithm_params, nn_model,
                           flags.base_log_path)
        # code for dumping the parameters as json
        # json.dump(algorithm_params, open('algorithm_param_output_cnnv4.json', 'w'), indent=3)
    else:
        print('Running in hyper parameter optimization mode')
        model_space_fun = nn_model.get_hyper_param_space

        global episode_run_index
        trial_fileaddress = os.path.join(flags.base_log_path, "trial.p")
        while True:
            try:
                with open(trial_fileaddress, "rb") as read_file:
                    trials = pickle.load(read_file)
                episode_run_index = len(trials.trials)
                best = convert_trial_to_dictvalues(
                    trials.best_trial['misc']['vals'])
            except IOError:
                print("No trials file found. Starting trials from scratch")
                episode_run_index = 0
                trials = Trials()

            if episode_run_index == flags.max_evals:
                break

            best = fmin(
                fn=lambda params: 1 -
                (perform_an_episode(flags, params, nn_model, flags.
                                    base_log_path).validation_accuracy),
                space=model_space_fun(),
                algo=tpe.suggest,
                trials=trials,
                max_evals=episode_run_index + 1)
            pickle.dump(trials, open(trial_fileaddress, "wb"))

        json.dump(trials.results, open('trial_results.json', 'w'), indent=3)
        print(space_eval(model_space_fun(), best))
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    loader_name = flags.loader_name
    loader = get_class(loader_name + '.' + loader_name)(flags.path)
    sample_set = loader.load_samples(0.1, 0.1)
    data_set = loader.load_data(0, False)
    scene_shape = loader.get_scene_shape(data_set)

    imsave(
        os.path.join(flags.output_path, "result_colorized.tif"),
        create_colored_image(
            create_target_image_via_samples(sample_set, scene_shape),
            loader.get_target_color_list()))
Exemple #6
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    model = get_class(flags.model_name + '.' + flags.model_name)()
    algorithm_params = model.get_default_params(flags.batch_size)
    if flags.algorithm_param_path is not None:
        algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))

    importer_name = flags.importer_name
    data_importer = get_class(importer_name + '.' + importer_name)()

    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
    scene_shape, color_list = \
        data_importer.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    validation_data_with_labels = data_importer.create_all_scene_data(
        scene_shape, validation_data_with_labels)
    testing_tensor, training_tensor, validation_tensor = data_importer.convert_data_to_tensor(
        test_data_with_labels, training_data_with_labels,
        validation_data_with_labels, class_range)

    deep_nn_template = tf.make_template('nn_core',
                                        model.create_tensor_graph,
                                        class_count=class_range.stop)

    start_time = time.time()

    validation_data_set = validation_tensor.dataset

    validation_input_iter = simple_nn_iterator(validation_data_set,
                                               flags.batch_size)
    validation_images, validation_labels = validation_input_iter.get_next()
    model_input_params = ModelInputParams(x=validation_images,
                                          y=None,
                                          device_id='/gpu:0',
                                          is_training=False)
    validation_tensor_outputs = deep_nn_template(
        model_input_params, algorithm_params=algorithm_params)
    validation_nn_params = NNParams(
        input_iterator=validation_input_iter,
        data_with_labels=None,
        metrics=None,
        predict_tensor=validation_tensor_outputs.y_conv)
    validation_nn_params.data_with_labels = validation_data_with_labels

    prediction = numpy.empty([scene_shape[0] * scene_shape[1]],
                             dtype=numpy.uint8)

    saver = tf.train.Saver(var_list=slim.get_variables_to_restore(
        include=["nn_core"], exclude=["image_gen_net_"]))
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = False
    config.gpu_options.per_process_gpu_memory_fraction = 1.0

    with tf.Session(config=config) as session:
        # Restore variables from disk.
        saver.restore(session, flags.base_log_path)

        # Init for imaging the results
        validation_tensor.importer.perform_tensor_initialize(
            session, validation_tensor, validation_nn_params)
        perform_prediction(session, validation_nn_params, prediction)
        scene_as_image = numpy.reshape(prediction, scene_shape)

        imsave(os.path.join(flags.output_path, "result_raw.tif"),
               scene_as_image)

        imsave(os.path.join(flags.output_path, "result_colorized.tif"),
               create_colored_image(scene_as_image, color_list))

        # Init for accuracy
        # validation_tensor.importer.perform_tensor_initialize(session, validation_tensor, validation_nn_params)
        # validation_accuracy = calculate_accuracy(session, validation_nn_params)
        # print('Validation accuracy=%g' % validation_accuracy)

    print('Done evaluation(%.3f sec)' % (time.time() - start_time))
Exemple #7
0
def main():
    # parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cmds', help='commandlist filename')
    parser.add_argument('-d', '--delay', help='delay between commands [s]')
    parser.add_argument("-l",
                        help='start in live modus',
                        action="store_true",
                        default=False)
    args = parser.parse_args()

    if args.l == False:
        global cmd_delay
        try:
            commandlist = cmd_parser.parse_cmds(args.cmds)
            print commandlist, cmd_delay
        except ValueError as e:
            print e
        except IOError as e:
            print "I/O error({0}): {1}".format(e.errno,
                                               e.strerror) + ":", args.cmds

        if args.delay != None:
            cmd_delay = args.delay

        try:
            i2ctool = I2C.I2CTool()
            pool = ThreadPool(processes=1)

            for command in commandlist:
                if command.isif:
                    res = execute_command(pool, i2ctool, command)
                    if res == config.I2C_TRUE:
                        execute_command(
                            pool, i2ctool,
                            cmd_parser.parse_cmd(command.getThen()))
                    elif res == config.I2C_FALSE:
                        execute_command(
                            pool, i2ctool,
                            cmd_parser.parse_cmd(command.getElse()))
                else:
                    res = execute_command(pool, i2ctool, command)

                if res == config.I2C_ERR:
                    print "ERROR: command", command
                    print "exit programm"
                    break
        finally:
            exit_program()

    else:
        print "To exit enter 'quit'"
        i2ctool = I2C.I2CTool()
        pool = ThreadPool(processes=1)

        while True:
            try:
                print "Enter command: ",
                line = sys.stdin.readline()
                if line.startswith("quit"):
                    exit_program()
                command = cmd_parser.parse_cmd(line)
                if command.isif:
                    res = execute_command(pool, i2ctool, command)
                    if res == config.I2C_TRUE:
                        execute_command(
                            pool, i2ctool,
                            cmd_parser.parse_cmd(command.getThen()))
                    elif res == config.I2C_FALSE:
                        execute_command(
                            pool, i2ctool,
                            cmd_parser.parse_cmd(command.getElse()))
                else:
                    execute_command(pool, i2ctool, command)
            except ValueError as e:
                print e
Exemple #8
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--output_path',
                        nargs='?',
                        const=True,
                        type=str,
                        default=os.path.dirname(__file__),
                        help='Path for saving output images')
    flags = parse_cmd(parser)

    model = get_class(flags.model_name + '.' + flags.model_name)()
    algorithm_params = model.get_default_params(flags.batch_size)
    if flags.algorithm_param_path is not None:
        algorithm_params = json.load(open(flags.algorithm_param_path, 'r'))

    importer_name = flags.importer_name
    # data_importer = get_class(importer_name + '.' + importer_name)()
    data_importer = ControlledDataImporter()

    training_data_with_labels, test_data_with_labels, validation_data_with_labels, shadow_dict, class_range, \
    scene_shape, color_list = \
        data_importer.read_data_set(flags.loader_name, flags.path, flags.test_ratio, flags.neighborhood, True)

    validation_data_with_labels = data_importer.create_all_scene_data(
        scene_shape, validation_data_with_labels)
    testing_tensor, training_tensor, validation_tensor = data_importer.convert_data_to_tensor(
        test_data_with_labels, training_data_with_labels,
        validation_data_with_labels, class_range)

    deep_nn_template = tf.make_template('nn_core',
                                        model.create_tensor_graph,
                                        class_count=class_range.stop)

    start_time = time.time()

    validation_data_set = validation_tensor.dataset

    validation_input_iter = simple_nn_iterator(validation_data_set,
                                               flags.batch_size)
    validation_images, validation_labels = validation_input_iter.get_next()
    model_input_params = ModelInputParams(x=validation_images,
                                          y=None,
                                          device_id='/gpu:0',
                                          is_training=False)
    validation_tensor_outputs = deep_nn_template(
        model_input_params, algorithm_params=algorithm_params)
    validation_nn_params = NNParams(
        input_iterator=validation_input_iter,
        data_with_labels=None,
        metrics=None,
        predict_tensor=validation_tensor_outputs.y_conv)
    validation_nn_params.data_with_labels = validation_data_with_labels

    saver = tf.train.Saver(var_list=slim.get_variables_to_restore(
        include=["nn_core"], exclude=["image_gen_net_"]))
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = False
    config.gpu_options.per_process_gpu_memory_fraction = 1.0

    with tf.Session(config=config) as session:
        # Restore variables from disk.
        saver.restore(session, flags.base_log_path)

        # Init for imaging the results
        validation_tensor.importer.perform_tensor_initialize(
            session, validation_tensor, validation_nn_params)

        histogram_tensors = []
        histogram_tensor_names = []
        result_map = {}
        bin_map = {}
        base_bin = 480 / calculate_tensor_size(
            validation_tensor_outputs.histogram_tensors[0].tensor)
        for histogram_tensor_instance in validation_tensor_outputs.histogram_tensors:
            histogram_tensors.append(histogram_tensor_instance.tensor)
            histogram_tensor_names.append(histogram_tensor_instance.name)
            result_map[histogram_tensor_instance.name] = []
            bin_map[histogram_tensor_instance.name] = int(
                base_bin *
                calculate_tensor_size(histogram_tensor_instance.tensor))

        sample_idx = 0
        while True:
            try:
                # prediction, current_prediction = session.run([
                #     tf.argmax(validation_nn_params.predict_tensor, 1), histogram_tensors])

                current_prediction = session.run(histogram_tensors)
                if sample_idx > 2000:
                    for tensor_result, tensor_name in zip(
                            current_prediction, histogram_tensor_names):
                        result_map[tensor_name].append(tensor_result)
                if sample_idx == 5000:
                    break

                sample_idx = sample_idx + 1

            except tf.errors.OutOfRangeError:
                break

        for tensor_name in histogram_tensor_names:
            range_min = sys.float_info.max
            range_max = sys.float_info.min
            for result in result_map[tensor_name]:
                range_min = min(range_min, result.min())
                range_max = max(range_max, result.max())

            all_hists = numpy.zeros(
                [len(result_map[tensor_name]), bin_map[tensor_name]],
                dtype=numpy.int)
            bin_edges = None
            for idx, result in enumerate(result_map[tensor_name]):
                hist, bin_edges = numpy.histogram(result,
                                                  range=(range_min, range_max),
                                                  bins=bin_map[tensor_name])
                all_hists[idx] = hist

            mean = numpy.mean(all_hists, axis=0)
            plt.plot(bin_edges[:-1], mean, label=tensor_name)
            plt.xlim(min(bin_edges), max(bin_edges))
            plt.xlabel("Value")
            plt.ylabel("Counts")
            plt.fill_between(bin_edges[:-1], mean)
            plt.title(tensor_name)
            plt.savefig(
                os.path.join(flags.output_path, tensor_name + "_fig.jpg"))
            plt.clf()

    print('Done evaluation(%.3f sec)' % (time.time() - start_time))