Пример #1
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)
    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.test.is_gpu_available() and not CONFIG["debug"]:
        C_CODE = 3
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [50, 50, C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [50, 50, C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_Y]
            },
        }
    else:
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [C_Y]
            },
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(
        tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"])
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.final_evaluate(EVALUATE, **CONFIG)
    final_kappa = cd.metrics_history["cohens kappa"][-1]
    final_acc = cd.metrics_history["ACC"][-1]
    performance = (final_kappa, final_acc)
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)
    del cd
    gc.collect()
    return performance, speed
Пример #2
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)

    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.test.is_gpu_available() and not CONFIG["debug"]:
        C_CODE = 3
        print("here")
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [50, 50, C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [50, 50, C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [50, 50, C_Y]
            },
        }
    else:
        print("why here?")
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [C_CODE]
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [C_CODE]
            },
            "dec_X": {
                "input_chs": C_CODE,
                "filter_spec": [C_X]
            },
            "dec_Y": {
                "input_chs": C_CODE,
                "filter_spec": [C_Y]
            },
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(
        tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"])
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.save_all_weights()
    cd.final_evaluate(EVALUATE, **CONFIG)
    # Alternate evaluation
    cd.alt_evaluate(EVALUATE, **CONFIG)
    final_kappa = cd.metrics_history["cohens kappa"][-1]
    final_acc = cd.metrics_history["ACC"][-1]
    performance = (final_kappa, final_acc)
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)

    # Save config parameters
    with open(os.path.join(CONFIG["logdir"], "config.txt"), "w+") as f:
        print(CONFIG, file=f)

    # Save full translated images
    train_flag_tmp = False
    x_code, y_code = cd._enc_x(x, train_flag_tmp), cd._enc_y(y, train_flag_tmp)
    x_hat, y_hat = cd._dec_x(y_code, train_flag_tmp), cd._dec_y(
        x_code, train_flag_tmp)

    im_dir = CONFIG["logdir"]
    #print("SHAPE OF X_HAT: ", x_hat.shape)
    #print("SHAPE OF Y_HAT: ", y_hat.shape)
    x_hat_out = np.squeeze(np.array(x_hat))
    y_hat_out = np.squeeze(np.array(y_hat))
    #print("SHAPE OF X_HAT_OUT: ", x_hat_out.shape)
    #print("SHAPE OF Y_HAT_OUT: ", y_hat_out.shape)

    tifffile.imsave(os.path.join(im_dir, "x_hat_full.tif"),
                    x_hat_out,
                    planarconfig="contig")
    tifffile.imsave(os.path.join(im_dir, "y_hat_full.tif"),
                    y_hat_out,
                    planarconfig="contig")

    del x_hat, x_code, y_hat, y_code
    del cd
    gc.collect()

    return performance, speed
Пример #3
0
#!/usr/bin/env python

import os
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
                                'sparat'))

import argparse

from datasets import datasets, get_dataset_path, fetch

parser = argparse.ArgumentParser(description="Fetch raw data files.",
                                 epilog="available datasets: " +
                                 ", ".join(datasets.keys()))
parser.add_argument(
    'tofetch',
    nargs='*',
    help="Names of datasets to fetch. If not given, all will be fetched.")
args = parser.parse_args()

if len(args.tofetch) <= 0:
    args.tofetch = datasets.keys()

for dataset_name in args.tofetch:
    fetch(dataset_name, get_dataset_path(dataset_name, datasets[dataset_name]))
Пример #4
0
def test(DATASET="Texas", CONFIG=None):
    if CONFIG is None:
        CONFIG = get_config(DATASET)
    _, _, EVALUATE, _ = datasets.fetch(DATASET, **CONFIG)
    cd = ChangePrior(**CONFIG)
    cd.print_all_input_images(EVALUATE)
Пример #5
0
def test(DATASET="Texas"):
    CONFIG = get_config(DATASET)
    _, _, EVALUATE, _ = datasets.fetch(DATASET, **CONFIG)
    cd = ChangeDetector(**CONFIG)
    cd.print_all_input_images(EVALUATE)
Пример #6
0
#!/usr/bin/env python

import os
import os.path
import sys
sys.path.insert(
    0, os.path.join(os.path.dirname(__file__), os.pardir, 'sparat'))

import argparse

from datasets import datasets, get_dataset_path, fetch


parser = argparse.ArgumentParser(
    description="Fetch raw data files.",
    epilog="available datasets: " + ", ".join(datasets.keys()))
parser.add_argument(
    'tofetch', nargs='*',
    help="Names of datasets to fetch. If not given, all will be fetched.")
args = parser.parse_args()

if len(args.tofetch) <= 0:
    args.tofetch = datasets.keys()

for dataset_name in args.tofetch:
    fetch(dataset_name, get_dataset_path(dataset_name, datasets[dataset_name]))
Пример #7
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_kACE(DATASET)
    print(f"Loading {DATASET} data")
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.config.list_physical_devices("GPU") and not CONFIG["debug"]:
        C_CODE = 3
        print("here")
        TRANSLATION_SPEC = {
            "enc_X": {"input_chs": C_X, "filter_spec": [50, 50, C_CODE]},
            "enc_Y": {"input_chs": C_Y, "filter_spec": [50, 50, C_CODE]},
            "dec_X": {"input_chs": C_CODE, "filter_spec": [50, 50, C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [50, 50, C_Y]},
        }
    else:
        print("why here?")
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {"input_chs": C_X, "filter_spec": [C_CODE]},
            "enc_Y": {"input_chs": C_Y, "filter_spec": [C_CODE]},
            "dec_X": {"input_chs": C_CODE, "filter_spec": [C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [C_Y]},
        }
    print("Change Detector Init")
    cd = Kern_AceNet(TRANSLATION_SPEC, **CONFIG)
    print("Training")
    training_time = 0
    cross_loss_weight = tf.expand_dims(tf.zeros(x_im.shape[:-1], dtype=tf.float32), -1)
    for epochs in CONFIG["list_epochs"]:
        CONFIG.update(epochs=epochs)
        tr_gen, dtypes, shapes = datasets._training_data_generator(
            x_im[0], y_im[0], cross_loss_weight[0], CONFIG["patch_size"]
        )
        TRAIN = tf.data.Dataset.from_generator(tr_gen, dtypes, shapes)
        TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        for x, y, _ in EVALUATE.batch(1):
            alpha = cd([x, y])
        cross_loss_weight = 1.0 - alpha
        training_time += tr_time

    cd.load_all_weights(cd.log_path)
    cd.final_evaluate(EVALUATE, **CONFIG)
    metrics = {}
    for key in list(cd.difference_img_metrics.keys()) + list(
        cd.change_map_metrics.keys()
    ):
        metrics[key] = cd.metrics_history[key][-1]
    metrics["F1"] = metrics["TP"] / (
        metrics["TP"] + 0.5 * (metrics["FP"] + metrics["FN"])
    )
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    speed = (epoch, training_time, timestamp)
    del cd
    gc.collect()
    return metrics, speed
Пример #8
0
def test(DATASET="Texas", CONFIG=None):
    """
    1. Fetch data (x, y, change_map)
    2. Compute/estimate A_x and A_y (for patches)
    3. Compute change_prior
    4. Define dataset with (x, A_x, y, A_y, p). Choose patch size compatible
       with affinity computations.
    5. Train CrossCyclicImageTransformer unsupervised
        a. Evaluate the image transformations in some way?
    6. Evaluate the change detection scheme
        a. change_map = threshold [(x - f_y(y))/2 + (y - f_x(x))/2]
    """
    if CONFIG is None:
        CONFIG = get_config_SCCN(DATASET)
    x_im, y_im, EVALUATE, (C_X, C_Y) = datasets.fetch(DATASET, **CONFIG)
    if tf.test.is_gpu_available() and not CONFIG["debug"]:
        C_CODE = CONFIG["C_CODE"]
        TRANSLATION_SPEC = {
            "enc_X": {
                "input_chs": C_X,
                "filter_spec": [C_CODE, C_CODE, C_CODE, C_CODE],
            },
            "enc_Y": {
                "input_chs": C_Y,
                "filter_spec": [C_CODE, C_CODE, C_CODE, C_CODE],
            },
            "dec_X": {"input_chs": C_CODE, "filter_spec": [C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [C_Y]},
        }
    else:
        C_CODE = 1
        TRANSLATION_SPEC = {
            "enc_X": {"input_chs": C_X, "filter_spec": [C_CODE]},
            "enc_Y": {"input_chs": C_Y, "filter_spec": [C_CODE]},
            "dec_X": {"input_chs": C_CODE, "filter_spec": [C_X]},
            "dec_Y": {"input_chs": C_CODE, "filter_spec": [C_Y]},
        }
    cd = SCCN(TRANSLATION_SPEC, **CONFIG)
    training_time = 0
    Pu = tf.expand_dims(tf.ones(x_im.shape[:-1], dtype=tf.float32), -1)
    TRAIN = tf.data.Dataset.from_tensor_slices((x_im, y_im, Pu))
    TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
    training_time, _ = cd.pretrain(EVALUATE, evaluation_dataset=EVALUATE, **CONFIG)
    epochs = CONFIG["epochs"]
    CONFIG.update(epochs=1)
    for epoch in trange(epochs):
        tr_time, _ = cd.train(TRAIN, evaluation_dataset=EVALUATE, **CONFIG)
        training_time += tr_time
        if epoch > 10:
            for x, y, _ in EVALUATE.batch(1):
                Pu = 1.0 - tf.cast(cd._change_map(cd([x, y])), dtype=tf.float32)
            del TRAIN
            gc.collect()
            TRAIN = tf.data.Dataset.from_tensor_slices((x_im, y_im, Pu))
            TRAIN = TRAIN.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    cd.final_evaluate(EVALUATE, **CONFIG)
    final_kappa = cd.metrics_history["cohens kappa"][-1]
    timestamp = cd.timestamp
    epoch = cd.epoch.numpy()
    return final_kappa, epoch, training_time, timestamp
Пример #9
0
"""Command Line Interface for the collections service.

Usage:
    cli.py [fetch] [decrypt] NAME

Options:
    -h --help  Show this screen.
    --version  Show version.

"""

import sys
import iorw
import datasets
import encryption

from docopt import docopt

# Load info about all appropriate datasets into
# memory
dataRepo = datasets.Repository()
dataRepo.loadDatasets('./fixtures/datasets.yaml')

if __name__ == '__main__':
    arguments = docopt(__doc__, version='Collections CLI 0.1')
    if arguments['fetch']:
        datasets.fetch(arguments['NAME'], dataRepo)
    if arguments['decrypt']:
        datasets.decrypt(arguments['NAME'], dataRepo)