예제 #1
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)

    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.test.is_gpu_available() and not CONFIG["debug"]:
        C_CODE = 3
        print("here")
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [50, 50, C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [50, 50, C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_Y]
            },
        }
    else:
        print("why here?")
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [C_Y]
            },
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(
        tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"])
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.save_all_weights()
    cd.final_evaluate(EVALUATE, **CONFIG)
    # Alternate evaluation
    cd.alt_evaluate(EVALUATE, **CONFIG)
    final_kappa = cd.metrics_history["cohens kappa"][-1]
    final_acc = cd.metrics_history["ACC"][-1]
    performance = (final_kappa, final_acc)
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)

    # Save config parameters
    with open(os.path.join(CONFIG["logdir"], "config.txt"), "w+") as f:
        print(CONFIG, file=f)

    # Save full translated images
    train_flag_tmp = False
    x_code, y_code = cd._enc_x(x, train_flag_tmp), cd._enc_y(y, train_flag_tmp)
    x_hat, y_hat = cd._dec_x(y_code, train_flag_tmp), cd._dec_y(
        x_code, train_flag_tmp)

    im_dir = CONFIG["logdir"]
    #print("SHAPE OF X_HAT: ", x_hat.shape)
    #print("SHAPE OF Y_HAT: ", y_hat.shape)
    x_hat_out = np.squeeze(np.array(x_hat))
    y_hat_out = np.squeeze(np.array(y_hat))
    #print("SHAPE OF X_HAT_OUT: ", x_hat_out.shape)
    #print("SHAPE OF Y_HAT_OUT: ", y_hat_out.shape)

    tifffile.imsave(os.path.join(im_dir, "x_hat_full.tif"),
                    x_hat_out,
                    planarconfig="contig")
    tifffile.imsave(os.path.join(im_dir, "y_hat_full.tif"),
                    y_hat_out,
                    planarconfig="contig")

    del x_hat, x_code, y_hat, y_code
    del cd
    gc.collect()

    return performance, speed
예제 #2
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)
    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.test.is_gpu_available() and not CONFIG["debug"]:
        C_CODE = 3
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [50, 50, C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [50, 50, C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_Y]
            },
        }
    else:
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [C_Y]
            },
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(
        tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"])
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.final_evaluate(EVALUATE, **CONFIG)
    final_kappa = cd.metrics_history["cohens kappa"][-1]
    final_acc = cd.metrics_history["ACC"][-1]
    performance = (final_kappa, final_acc)
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)
    del cd
    gc.collect()
    return performance, speed
예제 #3
0
    #process_list = polmak_list
    #process_list = ["Polmak-Air05-Air10-align_sub0", "Polmak-Air10-Air15-align_sub0"] #"Polmak-Air05-Air15-align_sub0"] #,
    #process_list = ["Polmak-PAL-A2-align", "Polmak-LS5-S2", "Polmak-LS2004-RLee_C-align", "Texas", "California",  "Polmak-LS5-PGNLM_A", "Polmak-LS5-PGNLM_C"]
    # ["Polmak-Air05-Air10-align_sub0", "Polmak-Air05-Air15-align_sub0"]
    # process_list = ["Polmak-LS5-S2", "Polmak-A2-S2", "Polmak-A2-S2-collocate", "Polmak-LS5-PGNLM_A", "Polmak-LS5-PGNLM_C"]

    for DATASET in process_list:
        if DATASET in [
                "Polmak-Pal-RS2_010817-collocate", "Polmak-LS5-S2-NDVI",
                "Polmak-Air05-S2-align_sub0", "Polmak-Air15-S2-align_sub0"
        ]:  # "Polmak-Pal-RS2_010817-collocate",
            print("Skipping dataset: " + DATASET)
            continue
        else:
            print(DATASET)
        CONFIG = get_config_kACE(DATASET)

        # Basis for difference image
        CONFIG["difference_basis"] = "original"  # "translated" #
        # Bandwidth for domain difference images
        CONFIG["domain_diff_bw_x"] = tf.constant(
            3.0, dtype=tf.float32)  # tf.constant(3, dtype=tf.float32)
        CONFIG["domain_diff_bw_y"] = tf.constant(
            3.0, dtype=tf.float32)  # tf.constant(3, dtype=tf.float32)
        #CONFIG["krnl_width_x"] = 1.0
        #CONFIG["krnl_width_y"] = 1.0

        # Suffix to add to log output name
        suffix = ""  #"_NOTILDE" # "_sigma25pct" # "_domdiffBW3_kwx0p50_kwy0p50" # "_NOTILDE_kwx1p0_kwy1p0" #
        suffix += "_diff-" + CONFIG["difference_basis"]
        print(suffix)
예제 #4
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)
    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.config.list_physical_devices("GPU") and not CONFIG["debug"]:
        C_CODE = 3
        print("here")
        TRANSLATION_SPEC = {
            "enc_X": {"input_chs": C_X, "filter_spec": [50, 50, C_CODE]},
            "enc_Y": {"input_chs": C_Y, "filter_spec": [50, 50, C_CODE]},
            "dec_X": {"input_chs": C_CODE, "filter_spec": [50, 50, C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [50, 50, C_Y]},
        }
    else:
        print("why here?")
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {"input_chs": C_X, "filter_spec": [C_CODE]},
            "enc_Y": {"input_chs": C_Y, "filter_spec": [C_CODE]},
            "dec_X": {"input_chs": C_CODE, "filter_spec": [C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [C_Y]},
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"]
        )
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.load_all_weights(cd.log_path)
    cd.final_evaluate(EVALUATE, **CONFIG)
    metrics = {}
    for key in list(cd.difference_img_metrics.keys()) + list(
        cd.change_map_metrics.keys()
    ):
        metrics[key] = cd.metrics_history[key][-1]
    metrics["F1"] = metrics["TP"] / (
        metrics["TP"] + 0.5 * (metrics["FP"] + metrics["FN"])
    )
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)
    del cd
    gc.collect()
    return metrics, speed