Exemple #1
0
 def __init__(self, args):
     self.args = args
     torch.manual_seed(self.args.seed)
     np.random.seed(self.args.seed)
     print('{} detection...'.format(args.dataset))
     white_noise = dp.DatasetReader(white_noise=self.args.dataset,
                                    data_path=data_path,
                                    data_source=args.data_source,
                                    len_seg=self.args.len_seg)
     _, self.testset = white_noise(args.net_name)
     self.spots = np.load('{}/spots.npy'.format(info_path))
     self.AE = AutoEncoder(args)
     self.latent = np.load('{}/features/{}.npy'.format(
         save_path, self.file_name()))
 def __init__(self, args):
     self.args = args
     white_noise = DatasetReader(white_noise='W-1',
                                 data_path=data_path,
                                 data_source=args.data_source,
                                 len_seg=self.args.len_seg)
     self.dataset, _ = white_noise(args.net_name)
     self.spots = np.load('{}/spots.npy'.format(info_path))
     self.AE = AutoEncoder(args)
     self.font = {
         'family': 'Arial',
         'style': 'normal',
         'weight': 'bold',
         'size': 10,
         'color': 'k',
     }
Exemple #3
0
def train_model(x_data, y_data):
    model = None

    if args.name == "VAE":
        model = vae(args, device).to(device)
    elif args.name == "AutoEncoder":
        model = AutoEncoder(args, device).to(device)

    for epoch in range(args.nb_epochs):
        x_train, y_train = Utils.SuffleData(x_data, y_data, args.batch_size)

        loss = model.learn(x_train)

        if epoch % args.log_interval == 0:
            print('Epoch {:4d}/{} loss: {:.6f} '.format(epoch, args.nb_epochs, loss))

    return model
Exemple #4
0
def baseline_ae_model(region_tensors, is_training, encoded_dims):
    squared_euclidean = squared_x = None
    for name in region_tensors:
        tensors = region_tensors[name]['tensors']
        filters = region_tensors[name]['filters']
        reuse = region_tensors[name]['reuse']
        scope = region_tensors[name]['scope']
        ae = AutoEncoder(tensors, encoded_dims=encoded_dims, filters=filters, is_training=is_training, reuse=reuse, name='compressor_{}'.format(scope))
        sq_x, sq_euclidean, cosine_similarity = reconstruction_distances(ae.input_tensor, ae.reconstruction)
        with tf.name_scope('latent_variables'):
            if squared_euclidean is None:
                squared_euclidean = sq_euclidean
                squared_x = sq_x
            else:
                squared_euclidean = squared_euclidean + sq_euclidean
                squared_x = squared_x + sq_x

        return tf.sqrt(squared_euclidean) / tf.sqrt(squared_x), ae.flatten_encoded, tf.reduce_mean(squared_euclidean)
Exemple #5
0
 def __init__(self, args):
     self.args = args
     torch.manual_seed(self.args.seed)
     np.random.seed(self.args.seed)
     print('> Training arguments:')
     for arg in vars(args):
         print('>>> {}: {}'.format(arg, getattr(args, arg)))
     white_noise = dp.DatasetReader(white_noise=self.args.dataset,
                                    data_path=data_path,
                                    data_source=args.data_source,
                                    len_seg=self.args.len_seg)
     dataset, _ = white_noise(args.net_name)
     self.data_loader = DataLoader(dataset=dataset,
                                   batch_size=args.batch_size,
                                   shuffle=False)
     self.spots = np.load('{}/spots.npy'.format(info_path))
     self.AE = AutoEncoder(args).to(device)  # AutoEncoder
     self.AE.apply(self.weights_init)
     self.criterion = nn.MSELoss()
     self.vis = visdom.Visdom(
         env='{}'.format(self.file_name()),
         log_to_filename='{}/visualization/{}.log'.format(
             save_path, self.file_name()))
     plt.figure(figsize=(15, 15))
from tensorflow.python.keras.datasets import mnist
from models.AutoEncoder import AutoEncoder
import tensorflow as tf
import numpy as np

(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))

autoencoder = AutoEncoder()
autoencoder.build()
# autoencoder.train(x_train, x_test)
Exemple #7
0
from Preprocessor import Preprocessor
from train.SDNetTrainer import SDNetTrainer
from datasets.STL10 import STL10
from models.AutoEncoder import AutoEncoder
from models.SpotNet import SNet

target_shape = [96, 96, 3]
ae = AutoEncoder(num_layers=4,
                 batch_size=128,
                 target_shape=target_shape,
                 tag='default')
model = SNet(ae,
             batch_size=128,
             target_shape=target_shape,
             disc_pad='SAME',
             tag='default')
data = STL10()
preprocessor = Preprocessor(target_shape=target_shape, augment_color=True)
trainer = SDNetTrainer(model=model,
                       dataset=data,
                       pre_processor=preprocessor,
                       num_epochs=500,
                       init_lr=0.0003,
                       lr_policy='linear',
                       num_gpus=2)
trainer.train_model(None)
Exemple #8
0
def dagmm(region_tensors,
          is_training,
          encoded_dims=2,
          mixtures=3,
          lambda_1=0.1,
          lambda_2=0.005,
          use_cosine_similarity=False,
          latent_dims=2):
    """
    :param region_tensors: restore the related tensors
    :param is_training: a tensorflow placeholder to indicate whether it is in the training phase or not
    :param encoded_dims:
    :param mixtures:
    :param lambda_1:
    :param lambda_2:
    :param use_cosine_similarity:
    :param latent_dims: reduce the dimension of encoded vector to a smaller one
    :return:
    """

    squared_x = squared_euclidean = z = None
    for name in region_tensors:
        tensors = region_tensors[name]['tensors']
        filters = region_tensors[name]['filters']
        reuse = region_tensors[name]['reuse']
        scope = region_tensors[name]['scope']
        ae = AutoEncoder(tensors,
                         encoded_dims=encoded_dims,
                         filters=filters,
                         is_training=is_training,
                         reuse=reuse,
                         name='compressor_{}'.format(scope))
        reduced_latent = base_dense_layer(ae.flatten_encoded,
                                          latent_dims,
                                          'reducer_{}'.format(name),
                                          is_training=is_training,
                                          bn=False,
                                          activation_fn=None)
        sq_x, sq_euclidean, cosine_similarity = reconstruction_distances(
            ae.input_tensor, ae.reconstruction)
        with tf.name_scope('latent_variables'):
            if use_cosine_similarity:
                relative_euclidean = tf.sqrt(sq_euclidean) / tf.sqrt(sq_x)
                relative_euclidean = tf.reshape(relative_euclidean, [-1, 1])
                cosine_similarity = tf.reshape(cosine_similarity, [-1, 1])
                distances = tf.concat([relative_euclidean, cosine_similarity],
                                      axis=1)
            else:
                distances = tf.sqrt(sq_euclidean) / tf.sqrt(sq_x)
                distances = tf.reshape(distances, [-1, 1])

            if squared_x is None:
                squared_x = sq_x
                squared_euclidean = sq_euclidean
                z = tf.concat([reduced_latent, distances], axis=1)
            else:
                squared_x = squared_x + sq_x
                squared_euclidean = squared_euclidean + sq_euclidean
                z = tf.concat([z, reduced_latent, distances], axis=1)
    with tf.name_scope('n_count'):
        n_count = tf.shape(z)[0]
        n_count = tf.cast(n_count, tf.float32)

    estimator = Estimator(mixtures, z, is_training=is_training)
    gammas = estimator.output_tensor

    with tf.variable_scope('gmm_parameters'):
        phis = tf.get_variable('phis',
                               shape=[mixtures],
                               initializer=tf.ones_initializer(),
                               dtype=tf.float32,
                               trainable=False)
        mus = tf.get_variable('mus',
                              shape=[mixtures, z.get_shape()[1]],
                              initializer=tf.ones_initializer(),
                              dtype=tf.float32,
                              trainable=False)

        init_sigmas = 0.5 * np.expand_dims(np.identity(z.get_shape()[1]),
                                           axis=0)
        init_sigmas = np.tile(init_sigmas, [mixtures, 1, 1])
        init_sigmas = tf.constant_initializer(init_sigmas)
        sigmas = tf.get_variable(
            'sigmas',
            shape=[mixtures, z.get_shape()[1],
                   z.get_shape()[1]],
            initializer=init_sigmas,
            dtype=tf.float32,
            trainable=False)

        sums = tf.reduce_sum(gammas, axis=0)
        sums_exp_dims = tf.expand_dims(sums, axis=-1)

        phis_ = sums / n_count
        mus_ = tf.matmul(gammas, z, transpose_a=True) / sums_exp_dims

        def assign_training_phis_mus():
            with tf.control_dependencies(
                [phis.assign(phis_), mus.assign(mus_)]):
                return [tf.identity(phis), tf.identity(mus)]

        phis, mus = tf.cond(is_training, assign_training_phis_mus,
                            lambda: [phis, mus])

        phis_exp_dims = tf.expand_dims(phis, axis=0)
        phis_exp_dims = tf.expand_dims(phis_exp_dims, axis=-1)
        phis_exp_dims = tf.expand_dims(phis_exp_dims, axis=-1)

        zs_exp_dims = tf.expand_dims(z, 1)
        zs_exp_dims = tf.expand_dims(zs_exp_dims, -1)
        mus_exp_dims = tf.expand_dims(mus, 0)
        mus_exp_dims = tf.expand_dims(mus_exp_dims, -1)

        zs_minus_mus = zs_exp_dims - mus_exp_dims

        sigmas_ = tf.matmul(zs_minus_mus, zs_minus_mus, transpose_b=True)
        broadcast_gammas = tf.expand_dims(gammas, axis=-1)
        broadcast_gammas = tf.expand_dims(broadcast_gammas, axis=-1)
        sigmas_ = broadcast_gammas * sigmas_
        sigmas_ = tf.reduce_sum(sigmas_, axis=0)
        sigmas_ = sigmas_ / tf.expand_dims(sums_exp_dims, axis=-1)
        sigmas_ = add_noise(sigmas_)

        def assign_training_sigmas():
            with tf.control_dependencies([sigmas.assign(sigmas_)]):
                return tf.identity(sigmas)

        sigmas = tf.cond(is_training, assign_training_sigmas, lambda: sigmas)

    with tf.name_scope('loss'):
        loss_reconstruction = tf.reduce_mean(squared_euclidean,
                                             name='loss_reconstruction')
        inversed_sigmas = tf.expand_dims(tf.matrix_inverse(sigmas), axis=0)
        inversed_sigmas = tf.tile(inversed_sigmas,
                                  [tf.shape(zs_minus_mus)[0], 1, 1, 1])
        energy = tf.matmul(zs_minus_mus, inversed_sigmas, transpose_a=True)
        energy = tf.matmul(energy, zs_minus_mus)
        energy = tf.squeeze(phis_exp_dims * tf.exp(-0.5 * energy), axis=[2, 3])
        energy_divided_by = tf.expand_dims(tf.sqrt(
            2.0 * math.pi * tf.matrix_determinant(sigmas)),
                                           axis=0) + 1e-12
        energy = tf.reduce_sum(energy / energy_divided_by, axis=1) + 1e-12
        energy = -1.0 * tf.log(energy)
        energy_mean = tf.reduce_sum(energy) / n_count
        loss_sigmas_diag = 1.0 / tf.matrix_diag_part(sigmas)
        loss_sigmas_diag = tf.reduce_sum(loss_sigmas_diag)
        loss = loss_reconstruction + lambda_1 * energy_mean + lambda_2 * loss_sigmas_diag

    return energy, z, loss, loss_reconstruction, energy_mean, loss_sigmas_diag