Exemplo n.º 1
0
def test_upsampling_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 11
    input_nb_col = 12

    for dim_ordering in ['th', 'tf']:
        if dim_ordering == 'th':
            input = np.random.rand(nb_samples, stack_size, input_nb_row,
                                   input_nb_col)
        else:  # tf
            input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
                                   stack_size)

        for length_row in [2, 3, 9]:
            for length_col in [2, 3, 9]:
                layer = convolutional.UpSampling2D(size=(length_row,
                                                         length_col),
                                                   dim_ordering=dim_ordering)
                layer.build(input.shape)
                output = layer(K.variable(input))
                np_output = K.eval(output)
                if dim_ordering == 'th':
                    assert np_output.shape[2] == length_row * input_nb_row
                    assert np_output.shape[3] == length_col * input_nb_col
                else:  # tf
                    assert np_output.shape[1] == length_row * input_nb_row
                    assert np_output.shape[2] == length_col * input_nb_col

                # compare with numpy
                if dim_ordering == 'th':
                    expected_out = np.repeat(input, length_row, axis=2)
                    expected_out = np.repeat(expected_out, length_col, axis=3)
                else:  # tf
                    expected_out = np.repeat(input, length_row, axis=1)
                    expected_out = np.repeat(expected_out, length_col, axis=2)

                assert_allclose(np_output, expected_out)
Exemplo n.º 2
0
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        # INPUTS
        self.input_data = Input(shape=self.input_shape)

        # ENCODER
        self.encoded = BatchNormalization()(self.input_data)
        for i in range(self.encoder_layers):
            self.encoded = BatchNormalization()(self.encoded)
            self.encoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.encoded)
            self.encoded = Dropout(rate=0.5)(self.encoded)
            self.encoded = pooling.MaxPooling2D(strides=self.pool_size,
                                                padding="same")(self.encoded)

        # DECODER
        self.decoded = BatchNormalization()(self.encoded)
        for i in range(self.decoder_layers):
            self.decoded = BatchNormalization()(self.decoded)
            self.decoded = convolutional.Conv2D(filters=self.filters,
                                                kernel_size=self.kernel_size,
                                                strides=self.strides,
                                                activation="elu",
                                                padding="same")(self.decoded)
            self.decoded = Dropout(rate=0.5)(self.decoded)
            self.decoded = convolutional.UpSampling2D(size=self.pool_size)(
                self.decoded)

        # ACTIVATION
        self.decoded = convolutional.Conv2D(filters=self.input_shape[2],
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="sigmoid",
                                            padding="same")(self.decoded)
        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
    def __init__(self,
                 input_shape=None,
                 n_epoch=None,
                 batch_size=None,
                 encoder_layers=None,
                 decoder_layers=None,
                 filters=None,
                 kernel_size=None,
                 strides=None,
                 pool_size=None,
                 denoising=None):
        args, _, _, values = inspect.getargvalues(inspect.currentframe())
        values.pop("self")

        for arg, val in values.items():
            setattr(self, arg, val)

        loss_history = LossHistory()

        early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
                                                   patience=10)

        reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.1, patience=20)

        self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]

        for i in range(self.encoder_layers):
            if i == 0:
                self.input_data = Input(shape=self.input_shape)
                self.encoded = BatchNormalization()(self.input_data)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif 0 < i < self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)
                self.encoded = Dropout(rate=0.5)(self.encoded)
            elif i == self.encoder_layers - 1:
                self.encoded = BatchNormalization()(self.encoded)
                self.encoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.encoded)

        self.encoded = pooling.MaxPooling2D(strides=self.pool_size,
                                            padding="same")(self.encoded)
        self.decoded = BatchNormalization()(self.encoded)
        self.decoded = convolutional.Conv2D(filters=self.filters,
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="elu",
                                            padding="same")(self.decoded)
        self.decoded = convolutional.UpSampling2D(size=self.pool_size)(
            self.decoded)

        for i in range(self.decoder_layers):
            if i < self.decoder_layers - 1:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)
                self.decoded = Dropout(rate=0.5)(self.decoded)
            else:
                self.decoded = BatchNormalization()(self.decoded)
                self.decoded = convolutional.Conv2D(
                    filters=self.filters,
                    kernel_size=self.kernel_size,
                    strides=self.strides,
                    activation="elu",
                    padding="same")(self.decoded)

        # 4D tensor with shape: (samples, new_rows, new_cols, filters).
        # Remember think of this as a 2D-Lattice across potentially multiple channels per observation.
        # Rows represent time and columns represent some quantities of interest that evolve over time.
        # Channels might represent different sources of information.
        self.decoded = BatchNormalization()(self.decoded)
        self.decoded = convolutional.Conv2D(filters=self.input_shape[2],
                                            kernel_size=self.kernel_size,
                                            strides=self.strides,
                                            activation="sigmoid",
                                            padding="same")(self.decoded)

        self.autoencoder = Model(self.input_data, self.decoded)
        self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
                                 loss="mean_squared_error")
Exemplo n.º 4
0
trainX = wrangledX[:n_train]
_, _, img_rows, img_cols = trainX.shape

# noise input
def gen_noise(batch_size, d):
    return np.random.uniform(0, 1, size=[batch_size, d])

# generator
n_channels = 200
l_width = img_rows / 2
g_input = kl.Input(shape=[noise_size])
H = klc.Dense(n_channels*l_width*l_width, init='glorot_normal')(g_input)
H = kln.BatchNormalization(mode=2)(H)
H = klc.Activation('relu')(H)
H = klc.Reshape([n_channels, l_width, l_width])(H)
H = klconv.UpSampling2D(size=(2,2))(H)
H = klconv.Convolution2D(n_channels/2, 3, 3, border_mode='same',
        init='glorot_uniform')(H)
H = kln.BatchNormalization(mode=2)(H)
H = klc.Activation('relu')(H)
H = klconv.Convolution2D(n_channels/4, 3, 3, border_mode='same',
        init='glorot_uniform')(H)
H = kln.BatchNormalization(mode=2)(H)
H = klc.Activation('relu')(H)
H = klconv.Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
g_V = klc.Activation('sigmoid')(H)
generator = km.Model(g_input, g_V)
generator.compile(loss='binary_crossentropy', optimizer=g_opt)

# discriminator
d_input = kl.Input(shape=[1,img_rows,img_cols])