示例#1
0
def sample_and_save_intermediate(model, sigmas, x=None, eps=2 * 1e-5, T=100, n_images=1, save_directory=None):
    """
    :param model:
    :param sigmas:
    :param eps:
    :param T:
    :return:
    """
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    if x is None:
        image_size = (n_images,) + utils.get_dataset_image_size(configs.config_values.dataset)
        x = tf.random.uniform(shape=image_size)
    else:
        image_size = x.shape
        n_images = image_size[0]

    x_all = None
    for i, sigma_i in enumerate(tqdm(sigmas, desc='Sampling for each sigma')):
        alpha_i = eps * (sigma_i / sigmas[-1]) ** 2
        idx_sigmas = tf.ones(n_images, dtype=tf.int32) * i
        for t in range(T):
            x = sample_one_step(model, x, idx_sigmas, alpha_i)

        if x_all is None:
            x_all = x
        else:
            x_all = tf.concat([x_all, x], axis=0)

    save_as_grid(x_all, save_directory + 'intermediate.png', rows=n_images)
    return x
示例#2
0
def save_image(image, filename):
    mode = 'L' if image.shape[-1] == 1 else 'RGB'
    im = Image.new(
        mode,
        utils.get_dataset_image_size(configs.config_values.dataset)[:2])
    im.paste(tf.keras.preprocessing.image.array_to_img(image))
    im.save(filename + '.png', format="PNG")
示例#3
0
def sample_and_save(model, sigmas, x=None, eps=2 * 1e-5, T=100, n_images=1, save_directory=None):
    """
    :param model:
    :param sigmas:
    :param eps:
    :param T:
    :return:
    """
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    if x is None:
        image_size = (n_images,) + utils.get_dataset_image_size(configs.config_values.dataset)
        x = tf.random.uniform(shape=image_size)
    else:
        image_size = x.shape
        n_images = image_size[0]

    for i, sigma_i in enumerate(tqdm(sigmas, desc='Sampling for each sigma')):
        alpha_i = eps * (sigma_i / sigmas[-1]) ** 2
        idx_sigmas = tf.ones(n_images, dtype=tf.int32) * i
        for t in range(T):
            x = sample_one_step(model, x, idx_sigmas, alpha_i)

            if (t + 1) % 10 == 0:
                save_as_grid(x, save_directory + f'sigma{i + 1}_t{t + 1}.png')
    return x
示例#4
0
def sample_many_and_save(model,
                         sigmas,
                         batch_size=1000,
                         eps=2 * 1e-5,
                         T=100,
                         n_images=1,
                         save_directory=None):
    """
    Used for sampling big amount of images (e.g. 50000)
    :param model: model for sampling (RefineNet)
    :param sigmas: sigma levels of noise
    :param eps:
    :param T: iteration per sigma level
    :return: Tensor of dimensions (n_images, width, height, channels)
    """
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    # Tuple for (n_images, width, height, channels)
    image_size = (n_images, ) + utils.get_dataset_image_size(
        configs.config_values.dataset)
    batch_size = min(batch_size, n_images)

    with tf.device("CPU"):
        x = tf.random.uniform(shape=image_size)
    x = tf.data.Dataset.from_tensor_slices(x).batch(batch_size)

    idx_image = 0
    for i_batch, batch in enumerate(
            tqdm(
                x,
                total=tf.data.experimental.cardinality(x).numpy(),
                desc="Generating samples",
            )):
        for i, sigma_i in enumerate(sigmas):
            alpha_i = eps * (sigma_i / sigmas[-1])**2
            idx_sigmas = tf.ones(batch.get_shape()[0], dtype=tf.int32) * i
            for t in range(T):
                batch = sample_one_step(model, batch, idx_sigmas, alpha_i)

        if save_directory is not None:
            batch = _preprocess_image_to_save(batch)
            for image in batch:
                im = Image.new("RGB", image_size[1:3])
                if image_size[-1] == 1:
                    image = tf.tile(image, [1, 1, 3])
                im.paste(tf.keras.preprocessing.image.array_to_img(image))
                im.save(save_directory + f"{idx_image}.png", format="PNG")
                idx_image += 1
示例#5
0
def sample_many(model,
                sigmas,
                batch_size=128,
                eps=2 * 1e-5,
                T=100,
                n_images=1):
    """
    Used for sampling big amount of images (e.g. 50000)
    :param model: model for sampling (RefineNet)
    :param sigmas: sigma levels of noise
    :param eps:
    :param T: iteration per sigma level
    :return: Tensor of dimensions (n_images, width, height, channels)
    """
    # Tuple for (n_images, width, height, channels)
    image_size = (n_images, ) + utils.get_dataset_image_size(
        configs.config_values.dataset)
    batch_size = min(batch_size, n_images)

    with tf.device("CPU"):
        x = tf.random.uniform(shape=image_size)
    x = tf.data.Dataset.from_tensor_slices(x).batch(batch_size)
    x_processed = None

    n_processed_images = 0
    for i_batch, batch in enumerate(
            tqdm(
                x,
                total=tf.data.experimental.cardinality(x).numpy(),
                desc="Generating samples",
            )):
        for i, sigma_i in enumerate(sigmas):
            alpha_i = eps * (sigma_i / sigmas[-1])**2
            idx_sigmas = tf.ones(batch.get_shape()[0], dtype=tf.int32) * i
            for t in range(T):
                batch = sample_one_step(model, batch, idx_sigmas, alpha_i)

        with tf.device("CPU"):
            if x_processed is not None:
                x_processed = tf.concat([x_processed, batch], axis=0)
            else:
                x_processed = batch

        n_processed_images += batch_size

    x_processed = _preprocess_image_to_save(x_processed)

    return x_processed
示例#6
0
def sample_and_save(model,
                    sigmas,
                    x=None,
                    eps=2 * 1e-5,
                    T=100,
                    n_images=1,
                    save_directory=None):
    """
    :param model:
    :param sigmas:
    :param eps:
    :param T:
    :return:
    """
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    if x is None:
        image_size = [
            n_images,
        ] + utils.get_dataset_image_size(configs.config_values.dataset)
        x = tf.random.uniform(shape=image_size)
    else:
        image_size = x.shape
        n_images = image_size[0]

    # # Use ground truth masks
    # if (configs.config_values.dataset == "masked_fashion"):
    #     print("Using groundtruth masks...")
    #     x = x.numpy()
    #     fashion_test = get_train_test_data("masked_fashion")[1]
    #     fashion_test = fashion_test.batch(n_images).take(1)
    #     f_samples = next(iter(fashion_test)).numpy()
    #     x[...,-1] = f_samples[...,-1]
    #     x = tf.constant(x)

    for i, sigma_i in enumerate(tqdm(sigmas, desc="Sampling for each sigma")):
        alpha_i = eps * (sigma_i / sigmas[-1])**2
        idx_sigmas = tf.ones(n_images, dtype=tf.int32) * i
        for t in range(T):
            x = sample_one_step(model, x, idx_sigmas, alpha_i)

            if (t + 1) % T == 0:
                save_as_grid(x, save_directory + f"sigma{i + 1}_t{t + 1}.png")
    return x
示例#7
0
def build_model(dataset, pretrained=False):
    input_shape = utils.get_dataset_image_size(dataset)

    if configs.config_values.mask_marginals:
        input_shape[-1] += 1  # Append a mask channel

    base_model = tf.keras.applications.resnet_v2.ResNet50V2(
        input_shape=(
            input_shape[0],
            input_shape[1],
            3,
        ),  # if pretrained else input_shape,
        include_top=False if pretrained else True,
        weights="imagenet" if pretrained else None,
        classes=None if pretrained else 2,
        pooling="avg",
        classifier_activation=None,
    )

    if pretrained:
        model = tf.keras.Sequential([
            tf.keras.Input(shape=input_shape),
            tf.keras.layers.Conv2D(3, 1),  # 1x1 conv to increase channels
            base_model,
            tf.keras.layers.Dense(2),
        ])
    else:
        model = tf.keras.Sequential([
            tf.keras.Input(shape=input_shape),
            tf.keras.layers.Conv2D(3, 1),  # 1x1 conv to increase channels
            base_model,
        ])

    logging.info(model.summary())

    return model
def compute_batched_score_norms(model, x_test, masked_input=False, seed=None):
    # Sigma Idx -> Score
    score_dict = []
    masks_arr = []
    sigmas = utils.get_sigma_levels()
    input_shape = utils.get_dataset_image_size(configs.config_values.dataset)
    channels = input_shape[-1]
    progress_bar = tqdm(sigmas)
    for idx, sigma in enumerate(progress_bar):

        progress_bar.set_description("Sigma: {:.4f}".format(sigma))
        _logits = []
        if seed:
            tf.random.set_seed(seed)
            np.random.seed(seed)

        for x_batch in x_test:
            idx_sigmas = tf.ones(x_batch.shape[0], dtype=tf.int32) * idx
            score = model([x_batch, idx_sigmas]) * sigma

            if masked_input:
                _, masks = tf.split(x_batch, (channels - 1, 1), axis=-1)
                score = score * masks

            score = reduce_norm(score)
            _logits.append(score)
        score_dict.append(tf.identity(tf.concat(_logits, axis=0)))

    # N x L Matrix of score norms
    scores = tf.squeeze(tf.stack(score_dict, axis=1))

    # if masked_input:
    #     masks_arr = np.concatenate(masks_arr, axis=0)
    #     return dict(scores=scores.numpy(), masks=masks_arr)

    return scores.numpy()
示例#9
0
def build_and_train(dataset, n_epochs=25, pretrained=False):

    start_time = datetime.now().strftime("%y%m%d-%H%M%S")
    input_shape = utils.get_dataset_image_size(dataset)
    callbacks = [
        tf.keras.callbacks.EarlyStopping(
            # Stop training when `val_loss` is no longer improving
            monitor="val_loss",
            # an absolute change of less than min_delta, will count as no improvement
            min_delta=1e-3,
            # "no longer improving" being defined as "for at least patience epochs"
            patience=20,
            verbose=1,
        ),
        tf.keras.callbacks.ModelCheckpoint(
            filepath=os.path.join(MODELDIR, "e{epoch:03d}.ckpt"),
            # Only save a model if `val_loss` has improved.
            save_best_only=True,
            save_weights_only=True,
            monitor="val_loss",
            verbose=1,
            save_freq="epoch",
        ),
        tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss",
                                             factor=0.2,
                                             min_delta=1e-3,
                                             patience=1,
                                             min_lr=1e-5),
        tf.keras.callbacks.TensorBoard(
            f"./logs/classifier/{dataset}/{start_time}", update_freq=1),
    ]

    @tf.function
    def remove_mask(x, l):
        x, _ = tf.split(x, (input_shape[-1] - 1, 1), axis=-1)
        return x, l

    model = build_model(dataset, pretrained)
    bce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
    optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5)
    # optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)

    model.compile(
        optimizer=optimizer,
        loss=bce,
        metrics=[tf.keras.metrics.CategoricalAccuracy()],
    )

    train_ds, val_ds, test_ds = load_data(dataset,
                                          include_ood=False,
                                          supervised=True)

    train_ds = preprocess("knee", train_ds, train=True)
    # train_ds.map(remove_mask, num_parallel_calls=AUTOTUNE)
    train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)

    val_ds = preprocess("knee", val_ds, train=True)
    # val_ds.map(remove_mask, num_parallel_calls=AUTOTUNE)
    val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)

    test_ds = preprocess("knee", test_ds, train=False)
    # test_ds.map(remove_mask, num_parallel_calls=AUTOTUNE)
    test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)

    history = model.fit(
        train_ds,
        validation_data=val_ds,
        epochs=n_epochs,
        callbacks=callbacks,
    )

    logging.info("====== Performance on Test Set ======")
    load_and_eval(dataset)

    # load_last_checkpoint(model, MODELDIR)
    # test_preds = model.predict(test_ds)
    # test_labels = np.concatenate([np.argmax(l, axis=1) for _, l in test_ds], axis=0)

    # test_scores = test_preds[:, 1]
    # inlier_test_scores = test_scores[test_labels == 0]
    # outlier_test_scores = test_scores[test_labels == 1]

    # metrics = ood_metrics(
    #     inlier_test_scores, outlier_test_scores, verbose=True, plot=True
    # )
    # logging.info(metrics)

    return history