Ejemplo n.º 1
0
def cnn(train_x, train_y):
    # prepare data
    # define parameters
    epochs, batch_size, patience = 10, 64, 1500
    n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[
        2], train_y.shape[1]
    # define model
    model = Sequential()
    model.add(
        Conv1D(filters=32,
               kernel_size=2,
               activation='relu',
               strides=2,
               input_shape=(n_timesteps, n_features)))
    model.add(Conv1D(filters=64, kernel_size=2, activation='relu', strides=2))
    model.add(Conv1D(filters=128, kernel_size=2, activation='relu', strides=2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Reshape([8, 16]))
    model.add(UpSampling1D(2))
    model.add(Conv1D(filters=8, kernel_size=1, activation='relu', strides=1))
    model.add(UpSampling1D(2))
    model.add(Conv1D(filters=1, kernel_size=9, activation='relu', strides=1))
    model.add(Reshape([24, 1]))
    # model.add(Dense(n_outputs))

    model.compile(loss='mse', optimizer='adam')

    model.summary()
    # fit network
    # early_stopping = tf.keras.callbacks.EarlyStopping(patience=patience, mode='min', restore_best_weights= True)
    model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size)
    return model
Ejemplo n.º 2
0
    def __call__(self,
                 input_tensor,
                 units=64,
                 kernel_size=2,
                 predict_seq_length=1):
        out_0, out_1, out_2, x = input_tensor
        x = UpSampling1D(4)(x)
        x = Concatenate()([x, out_2])
        x = conv_br(x, units * 3, kernel_size, 1, 1)

        x = UpSampling1D(2)(x)
        x = Concatenate()([x, out_1])
        x = conv_br(x, units * 2, kernel_size, 1, 1)

        x = UpSampling1D(2)(x)
        x = Concatenate()([x, out_0])
        x = conv_br(x, units, kernel_size, 1, 1)

        # regression
        x = Conv1D(1, kernel_size=kernel_size, strides=1, padding="same")(x)
        out = Activation("sigmoid")(x)
        out = Lambda(lambda x: 12 * x)(out)
        out = AveragePooling1D(strides=4)(
            out
        )  # Todo: just a tricky way to change the batch*input_seq*1 -> batch_out_seq*1, need a more general way

        return out
Ejemplo n.º 3
0
    def _decoder(self):
        feature_maps = [16, 1]
        kernel_size = [3, 2]
        encoded_window = Input(shape=(120, 1))

        x = Conv1D(feature_maps[1], kernel_size[0],
                   padding="same")(encoded_window)
        x = LeakyReLU(alpha=0.2)(x)

        x = UpSampling1D(2)(x)

        x = Conv1D(feature_maps[0], kernel_size[1], padding="same")(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = UpSampling1D(2)(x)

        x = Conv1D(feature_maps[1], kernel_size[0], padding="same")(x)

        # "decoded" is the lossy reconstruction of the input
        decoded = LeakyReLU(alpha=0.2)(x)

        # create the decoder model
        decoder = Model(inputs=encoded_window, outputs=decoded)

        decoder.summary()

        #decoder.compile(optimizer = 'adam', loss = 'mse', metrics = ["accuracy"])

        self.decoder = decoder
        return decoder
Ejemplo n.º 4
0
    def make_decoder(self, latent_input, n_deconv = 4):
        '''
        Helper function to instatiate the decoder model.

        Parameters
        ----------
        latent_input : Tensorflow symbolic tensor
            The symbolic tensor which represents the latent input.
        n_deconv : int, optional
            The number of upsampling and convolution layers applied.

        Returns:
        --------
        Keras model
            The created encoder model.
        '''
        x = latent_input
        #x = Dense(self.latent_dim * 2, activation = 'relu') (x)
        x = Dense(4 * 128, activation = 'relu') (x)
        x = Reshape((4, 128)) (x)
        for _ in range(n_deconv - 1):
          x = UpSampling1D(2) (x)
          x = Conv1D(64 * 2 ** i, 3, activation = 'relu', padding = 'same') (x)
        x = UpSampling1D(2) (x)
        x = Conv1D(22, 5, activation = 'relu', padding = 'same') (x)

        decoder = Model(latent_input, x, name='decoder')
        if self.debug: decoder.summary()
        return decoder
Ejemplo n.º 5
0
def create_conv_max_pool_autoencoder(
    input_dimension,
    optimizer='adam',
    loss='binary_crossentropy',
) -> Model:
    input_layer = Input(shape=(input_dimension, 1))

    layer = Conv1D(32, 5, activation='relu', padding='same')(input_layer)
    layer = MaxPooling1D(2, padding='same')(layer)
    layer = Conv1D(16, 5, activation='relu', padding='same')(layer)
    layer = MaxPooling1D(2, padding='same')(layer)

    #encoder = Model(input_layer, layer)

    layer = UpSampling1D(2)(layer)
    layer = Conv1D(16, 5, activation='relu', padding='same')(layer)
    layer = UpSampling1D(2)(layer)
    layer = Conv1D(32, 5, activation='relu', padding='same')(layer)

    decoded = Conv1D(1, 1, activation='sigmoid', padding='same')(layer)

    model = Model(input_layer, decoded)

    model.compile(optimizer=optimizer, loss=loss)

    return model
Ejemplo n.º 6
0
    def _build_encoder(self):
        x = Input(shape=(self.step, self.feature_sz + 1))

        # Encoder
        h = Conv1D(256, 16, padding='same', activation='relu')(x)
        h = MaxPool1D(5, padding='same')(h)
        h = Conv1D(128, 8, padding='same', activation='relu')(h)
        h = MaxPool1D(2, padding='same')(h)
        h = Flatten()(h)  # None, 128
        _sz = h.shape[1]
        h = Dense(self.z_sz)(h)

        # Decoder
        out = h
        out = Dense(_sz, activation='relu')(out)
        out = Reshape((-1, 128))(out)  # Be careful with the shape
        out = Conv1D(128, 4, padding='same', activation='relu')(out)
        out = UpSampling1D(2)(out)
        out = Conv1D(self.feature_sz + 1, 16, padding='same')(out)
        out = UpSampling1D(5)(out)

        ae = Model(x, out)
        encoder = Model(x, h)

        return ae, encoder
Ejemplo n.º 7
0
    def __init__(self, ch):
        super(cnn_decoder, self).__init__()

        self.output_ch = ch

        self.up = UpSampling1D()
        self.up_mask = UpSampling1D()

        self.c1 = Conv1D(32, 3, strides=1, padding='valid')
        self.c2 = Conv1D(32, 3, strides=1, padding='valid')

        self.d1 = Dense(ch)
def decoder_skip(iot=False):
    encoded_shape = (63 + 32, 1)
    inputs_combined = Input(shape=encoded_shape, name='encoder_input_combined')

    encoded32_input = Lambda(lambda y: y[:, 32:63 + 32, ])(inputs_combined)
    encoded64_input = Lambda(lambda y: y[:, 0:32, ])(inputs_combined)

    AE_encoder_output_32CG = Conv1D(filters=8,
                                    kernel_size=3,
                                    activation='swish',
                                    padding='same')(encoded32_input)
    AE_encoder_output_32CG = Conv1D(filters=16,
                                    kernel_size=3,
                                    activation='swish',
                                    padding='same')(AE_encoder_output_32CG)
    AE_encoder_output_32CG_32filters = UpSampling1D(size=2)(
        AE_encoder_output_32CG)  # [118X16]
    AE_encoder_output_32CG_32filters = Conv1D(
        filters=16, kernel_size=3, activation='swish',
        padding='same')(AE_encoder_output_32CG_32filters)  # [63X32]
    AE_encoder_output_32CG_32filters = Conv1D(
        filters=32, kernel_size=3, activation='swish',
        padding='valid')(AE_encoder_output_32CG_32filters)  # [124X32]

    encoded64_input = Conv1D(filters=16,
                             kernel_size=7,
                             activation='swish',
                             padding='same')(encoded64_input)
    encoded64_input = Conv1D(filters=32,
                             kernel_size=3,
                             activation='swish',
                             padding='same')(encoded64_input)
    encoded64_input = UpSampling1D(size=2)(encoded64_input)  # [64X32]
    encoded64_input = Conv1D(filters=32,
                             kernel_size=3,
                             activation='swish',
                             padding='valid')(encoded64_input)
    encoded64_output = UpSampling1D(size=2)(encoded64_input)  # [124X32]

    if iot:
        decoder_shared = Decoder_shared_iot(True,
                                            name='decoder_32_64_shared_iot')
    else:
        pass  # decoder_shared = Decoder_16_32_shared(True, name='decoder_32_64_shared')
    decoded_shared_32 = decoder_shared(AE_encoder_output_32CG_32filters)
    decoded_shared_64 = decoder_shared(encoded64_output)

    Decoder_16_32_skip = Model(inputs_combined,
                               [decoded_shared_32 + decoded_shared_64],
                               name='decoder')
    return Decoder_16_32_skip
Ejemplo n.º 9
0
Archivo: kwgan.py Proyecto: fdch/kwgan
def get_generator():
  model = Sequential()
  model.add(Dense(4*4*16*64, activation='relu', input_dim=LATENT_DIM))
  model.add(Reshape((2**10,2**4)))
  model.add(BatchNormalization(momentum=0.8))
  # output (2**11, 2**4)
  model.add(UpSampling1D())
  model.add(Conv1D(2**6, kernel_size=25,strides=4,activation="relu",padding="same"))
  model.add(BatchNormalization(momentum=0.8))
  # 
  model.add(UpSampling1D())
  model.add(Conv1D(2**8, kernel_size=25,strides=4,activation="relu",padding="same"))
  model.add(BatchNormalization(momentum=0.8))
  # 
  model.add(UpSampling1D())
  model.add(Conv1D(2**7, kernel_size=25,strides=1,activation="relu",padding="same"))
  model.add(BatchNormalization(momentum=0.8))
  model.add(UpSampling1D())
  # 
  model.add(UpSampling1D())
  model.add(Conv1D(2**6, kernel_size=25,strides=1,activation="relu",padding="same"))
  model.add(BatchNormalization(momentum=0.8))
  model.add(UpSampling1D())
  #
  model.add(UpSampling1D())
  model.add(Conv1D(1, kernel_size=25,strides=1,activation="relu",padding="same"))
  model.add(BatchNormalization(momentum=0.8))
  model.add(UpSampling1D())
  #
  model.add(Activation("tanh"))
  model.summary()
  return model
Ejemplo n.º 10
0
def autoencoder(data_in):
    # Encoder:
    x = Conv1D(128, kernel_size=3, padding='same', activation='relu')(data_in)
    x = MaxPooling1D(pool_size=2, padding='same')(x)
    x = Conv1D(64, kernel_size=3, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2, padding='same')(x)
    x = Conv1D(32, kernel_size=3, padding='same', activation='relu')(x)
    # Decoder:
    x = Conv1D(32, kernel_size=3, padding='same', activation='relu')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, kernel_size=3, padding='same', activation='relu')(x)
    x = UpSampling1D(2)(x)
    data_out = Conv1D(1, kernel_size=2, padding='valid', activation='tanh')(x)

    return data_out
Ejemplo n.º 11
0
def autoencoder_model(timesteps, input_dim):
    inputs = Input(shape=(timesteps, input_dim), name='input')  # (,7813,6)
    activation = 'sigmoid'  # sigmoid
    activation_last = 'sigmoid'  # relu

    maxpoolsize = 2
    latent_dim = input_dim  # 没有意义,和xyz轴已经不对应了
    kernelsize = 8

    x = Conv1D(16, kernelsize, activation=activation, padding='same', use_bias=True)(inputs)
    x = BatchNormalization(axis=-1)(x)
    x = MaxPooling1D(maxpoolsize, padding='same')(x)
    x = Conv1D(8, kernelsize, activation=activation, padding='same', use_bias=True,
               input_shape=(timesteps, input_dim))(x)
    x = BatchNormalization(axis=-1)(x)
    x = MaxPooling1D(maxpoolsize, padding='same')(x)
    x = Conv1D(latent_dim, kernelsize, activation=activation_last, padding='same', use_bias=True,
               input_shape=(timesteps, input_dim))(x)
    x = BatchNormalization(axis=-1)(x)
    encoded = MaxPooling1D(2, padding='same')(x)  # (,977,2)

    x = Conv1D(latent_dim, kernelsize, activation=activation, padding='same', use_bias=True,
               input_shape=(timesteps, input_dim))(encoded)
    x = BatchNormalization(axis=-1)(x)
    x = UpSampling1D(maxpoolsize)(x)
    x = Conv1D(8, kernelsize, activation=activation, padding='same', use_bias=True,
               input_shape=(timesteps, input_dim))(x)
    x = BatchNormalization(axis=-1)(x)
    x = UpSampling1D(maxpoolsize)(x)
    x = Conv1D(16, kernelsize, activation=activation, padding='same', use_bias=True,
               input_shape=(timesteps, input_dim))(
        x)
    x = BatchNormalization(axis=-1)(x)
    x = UpSampling1D(maxpoolsize)(x)
    # print('AE timesteps', timesteps)
    # print('AE x.shape', x.shape)
    n_crop = int(x.shape[1] - timesteps)
    x = Cropping1D(cropping=(0, n_crop))(x)

    decoded = Conv1D(input_dim, kernelsize, activation='linear', padding='same', use_bias=False,
                     input_shape=(timesteps, input_dim), name='autoencoderl')(x)

    autoencoder = Model(inputs, decoded)
    # autoencoder.compile(optimizer='Adam', loss='mse')  # mine
    # autoencoder.compile(optimizer='rmsprop', loss='mse')
    # autoencoder.summary()
    encoder = Model(inputs, encoded, name='encoded_layer')
    return autoencoder, encoder
def cnn_encoder(n_timesteps, n_features):
    drop = 0
    input_ = Input((n_timesteps, 12))
    x = AveragePooling1D(4)(input_)
    x = Conv1D(32, 3, padding='same', activation='relu', name='encoder1')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='encoder2')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='encoder3')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder4')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder5')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder6')(x)
    x = AveragePooling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='encoder7')(x)
    # x = AveragePooling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'encoder8')(x)
    # x = AveragePooling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'encoder9')(x)

    x = AveragePooling1D(2, name='encoded')(x)

    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'decoder10')(x)
    # x = UpSampling1D(2)(x)
    # x = Conv1D(128, 3, padding = 'same', activation = 'relu', name = 'decoder9')(x)
    # x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder8')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder7')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder6')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder5')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder4')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder3')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(128, 3, padding='same', activation='relu', name='decoder2')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='decoder1')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', activation='relu', name='decoder0')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 3, padding='same', name='decoder-1')(x)
    x = Conv1D(12, 3, activation='sigmoid', padding='same', name='recover')(x)
    x = Flatten()(x)

    model = Model(inputs=input_, outputs=x)

    return model
Ejemplo n.º 13
0
def network_decoder_v2(x, code_size, sound_shape):
    n_upsamplings = 3
    u_best, f_best, smallest_overshoot = get_smaller_overshoot(
        code_size, sound_shape[0], n_upsamplings)

    x = ExpandDims(2)(x)

    for _ in range(n_upsamplings):
        x = keras.layers.Conv1D(filters=32,
                                kernel_size=3,
                                strides=1,
                                padding='same',
                                activation='linear')(x)
        x = keras.layers.BatchNormalization()(x)
        x = UpSampling1D(size=u_best)(x)
        x = keras.layers.LeakyReLU()(x)

    x = keras.layers.Conv1D(filters=f_best,
                            kernel_size=3,
                            strides=1,
                            padding='same',
                            activation='linear')(x)
    x = keras.layers.Flatten()(x)
    x = Slice(axis=1, initial=smallest_overshoot, final=None)(x)
    x = ExpandDims(2)(x)

    return x
Ejemplo n.º 14
0
def apply_upsampling(layer, N):
    n = layer.shape[1]
    factor = int(N//n)
    if(factor > 0):
        return UpSampling1D(factor)(layer)
    else:
        return layer
Ejemplo n.º 15
0
    def __init__(self, num_filter, size_filter, sampling_stride, use_regular_uppsampling = False,
                size = 2, rate = 0.1, l1 = 0.01, l2 = 0.01, use_max_pool = False):
        super(UpsampleMod_s_res, self).__init__()
        self.use_max_pool = use_max_pool
        self.bn1   = LayerNormalization()
        self.bn2   = LayerNormalization()
        self.bn3   = LayerNormalization()
        self.bn4   = LayerNormalization()

        self.reg1 = L1L2(l1=l1, l2=l2)
        self.reg2 = L1L2(l1=l1, l2=l2)
        self.reg3 = L1L2(l1=l1, l2=l2)
        self.reg4 = L1L2(l1=l1, l2=l2)
        
        self.add = Add()
        self.concat = Concatenate(axis=2)
        
        self.act = LeakyReLU()

        self.conv1 = Conv1D(num_filter, size_filter, padding='same',
                            kernel_regularizer = self.reg1, use_bias=False)
        self.conv2 = Conv1D(num_filter, size_filter, padding='same',
                            kernel_regularizer = self.reg2, use_bias=False)
        self.conv3 = Conv1D(num_filter, size_filter, padding='same',
                            kernel_regularizer = self.reg3, use_bias=False)
        
        if not self.use_max_pool:
            self.u_sample = Conv1DTranspose(num_filter, size_filter, strides = sampling_stride,
                            kernel_regularizer = self.reg4, use_bias=False)
        else:
            self.u_sample = UpSampling1D(size = size)
        self.dOut = Dropout(rate)
Ejemplo n.º 16
0
def upsample_1D(inputs, residual, filters, kernel_size=3):
    conv = Conv1D(filters, (kernel_size), padding='same')(inputs)
    residual = Conv1D(filters, (1))(residual)
    conv = Add()([conv,residual])
    conv = Activation('relu')(conv)
    conv = UpSampling1D()(conv)
    return conv
Ejemplo n.º 17
0
def _upsampler(dimension, pool_x, pool_y, pool_z):
    if dimension == 4:
        return UpSampling3D(size=(pool_x, pool_y, pool_z))
    elif dimension == 3:
        return UpSampling2D(size=(pool_x, pool_y))
    elif dimension == 2:
        return UpSampling1D(size=pool_x)
Ejemplo n.º 18
0
    def __init__(self, num_layers=4, c1=128, c2=192, c3=256, drop_rate=0.1, num_heads=8):
        super().__init__()
        self.input_dense = Dense(c1)
        self.sigma_ffn = ff_network(c1//4, 2048)
        self.enc1 = ConvSubLayer(c1, [1, 2])
        self.enc2 = ConvSubLayer(c2, [1, 2])
        self.enc3 = DecoderLayer(c2, 3, drop_rate, pos_factor=4)
        self.enc4 = ConvSubLayer(c3, [1, 2])
        self.enc5 = DecoderLayer(c3, 4, drop_rate, pos_factor=2)
        self.pool = AveragePooling1D(2)
        self.upsample = UpSampling1D(2)

        self.skip_conv1 = Conv1D(c2, 3, padding='same')
        self.skip_conv2 = Conv1D(c3, 3, padding='same')
        self.skip_conv3 = Conv1D(c2*2, 3, padding='same')
        self.text_style_encoder = Text_Style_Encoder(c2*2, c2*4)
        self.att_dense = Dense(c2*2)
        self.att_layers = [DecoderLayer(c2*2, 6, drop_rate) 
                     for i in range(num_layers)]
                     
        self.dec3 = ConvSubLayer(c3, [1, 2])
        self.dec2 = ConvSubLayer(c2, [1, 1])
        self.dec1 = ConvSubLayer(c1, [1, 1])
        self.output_dense = Dense(2)
        self.pen_lifts_dense = Dense(1, activation='sigmoid')
Ejemplo n.º 19
0
    def build_decoder(self, latent_dim, block_size, k_size):
        model = Sequential()
        model.add(Input(shape=(latent_dim, )))
        model.add(Dense(units=latent_dim * 2, activation='relu'))
        model.add(Reshape(target_shape=(latent_dim, latent_dim)))

        current_len = model.layers[-1].output_shape[1]
        filters = 256
        while current_len < block_size:
            model.add(
                Conv1DTranspose(filters,
                                strides=1,
                                kernel_size=k_size,
                                padding='same'))
            model.add(UpSampling1D(size=2))
            model.add(Activation('relu'))
            filters = filters // 2
            current_len = model.layers[-1].output_shape[1]

        model.add(
            Conv1DTranspose(1,
                            kernel_size=k_size,
                            padding='same',
                            activation=None))
        return model
Ejemplo n.º 20
0
def auto_encoder_text2(vocab_size=None, filters=None, latentDim=None):

    input_layer = Input(shape=(vocab_size, 1))
    x = input_layer

    for f in filters:
        x = Conv1D(f, (3), activation='relu', padding='same')(x)
        x = MaxPool1D((2), padding='same')(x)

    volumeSize = K.int_shape(x)

    x = Flatten(name="bottleneck")(x)

    latent = Dense(latentDim, activation='tanh', name='encoded')(x)
    x = Dense(np.prod(volumeSize[1:]))(latent)

    x = Reshape((volumeSize[1], volumeSize[2]))(x)

    # decoding architecture
    for f in filters[::-1]:
        x = Conv1D(f, (3), activation='relu', padding='causal')(x)
        x = UpSampling1D((2))(x)

    output_layer = Conv1D(1, (3), padding='same', activation='sigmoid')(x)

    # compile the model
    model = Model(input_layer, output_layer)
    model.compile(optimizer='adam', loss='mse')

    return model
Ejemplo n.º 21
0
def auto_encoder():
    data, labs = data_generator_multiple(ndat, nL, sd)
    data = data.reshape((ndat, nL,1))
    print(data[:n_train].shape)
    #labs = np_utils.to_categorical(labs, 2)

    #create model
    input = Input(shape = (nL,1))
    x = Conv1D(filters = 2, kernel_size=4, activation='relu', padding = 'same')(input)

    encoded = MaxPooling1D(pool_size = 2, strides = 2, padding = 'same')(x) # stride is compression ratio

    x = Conv1D(filters = 2, kernel_size=4, activation='relu', padding = 'same')(encoded)

    x = UpSampling1D(size = 2)(x)

    decoded = Conv1D(filters = 1, kernel_size=4, activation='linear', padding = 'same')(x)

    model = Model(input, decoded)
    model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])

    model.fit(data[:n_train], data[:n_train], batch_size = 32,epochs=30)

    score = model.evaluate(data[n_train:], data[n_train:], verbose=0)
    print('Test accuracy:', score[1])

    layer_outputs = [layer.output for layer in model.layers]

    activation_model = models.Model(inputs = model.input, outputs = layer_outputs)

    return model, activation_model
Ejemplo n.º 22
0
 def __init__(self, frame_length, **kwargs):
     super(Decoder, self).__init__(**kwargs)
     self.frame_length = frame_length
     self.upsampling = UpSampling1D(size=frame_length // 64)
     self.dense1 = Dense(128, activation='softplus')
     self.dense2 = Dense(64, activation='softplus')
     self.dense3 = Dense(64, activation='softplus')
     self.dense4 = Dense(128, activation='relu')
     self.convtranspose1 = Conv1DTranspose(1, 128, padding='same')
    def __init__(self, norm_output, **kwargs):
        super(Decoder_shared_iot, self).__init__(**kwargs)
        self.norm_output = norm_output

        self.upsam1 = UpSampling1D(size=2)
        self.zeroPadding = ZeroPadding1D(padding=(1, 1))
        self.conv3 = Conv1D(filters=64,
                            kernel_size=7,
                            activation='swish',
                            padding='same')
        self.conv4 = Conv1D(filters=128,
                            kernel_size=7,
                            activation='swish',
                            padding='same')
        # 20
        self.upsam2 = UpSampling1D(size=2)
        self.conv5 = Conv1D(filters=16,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.conv6 = Conv1D(filters=32,
                            kernel_size=5,
                            activation='swish',
                            padding='same')
        self.upsam3 = UpSampling1D(size=2)
        self.conv7 = Conv1D(filters=32,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        # 25
        #         self.upsam4 = UpSampling1D(size=2)
        self.conv8 = Conv1D(filters=8,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.conv9 = Conv1D(filters=2,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.flatten = Flatten()

        self.Dense = Dense(2000, name='outputs_decoder')
Ejemplo n.º 24
0
 def HealpyUpSample(self, current_nside, current_indices, p):
     """
     :param p: Boost factor >=1 of the nside -> number of nodes increases by 4^p, note that the layer only checks if the dimensionality of the input is evenly divisible by 4^p and not if the ordering is correct (should be nested ordering)
     """
     layer = UpSampling1D(4**p)
     new_nside = int(current_nside * 2**p)
     self.current_indices = self._transform_indices(nside_in=current_nside,
                                                    nside_out=new_nside,
                                                    indices=current_indices)
     self.current_nside = new_nside
     return layer
Ejemplo n.º 25
0
def segnet(input_shape, n_classes, width=16, n_filters=16, dropout_rate=0.01, 
           input_dropout_rate=0.01, pool_size=4, l2_lambda=1e-30, n_blocks=5, 
           batch_normalization=False, crf=None, smooth=False, tanh=False):
    # starting input and dropout
    X_input=Input(shape=input_shape)
    X=Dropout(input_dropout_rate)(X_input)
    
    # down layers
    pools=[]
    for i in range(n_blocks):
        # double convolutional block for each step  
        for j in range(2 + int(i>1)):
            # Defaults are activation=None and stride=1
            X=Conv1D(filters=n_filters*(2**i), kernel_size=width, 
                     padding='same', kernel_initializer='he_normal', 
                     activity_regularizer=regularizers.l2(l2_lambda),
                     name='conv'+str(i+1)+'_down'+str(j+1))(X)
            # optional batchnorm, then ReLU activation and Dropout
            if batch_normalization:
                X=BatchNormalization(center=False)(X)
            X=Activation('tanh' if tanh else 'relu')(X)
            X=Dropout(dropout_rate)(X)
        # scrunch down, save layer
        X=MaxPooling1D(pool_size=pool_size, name='pool'+str(i))(X)
        pools.append(X)
    
    # up layers
    for i in reversed(range(n_blocks)):
        # link up with previous filters, expand back up
        X=concatenate([pools[i], X], axis = -1)
        X=UpSampling1D(size=pool_size, name='up'+str(i))(X)
        for j in range(2 + int(i>1)): 
            X=Conv1D(filters=n_filters*(2**i), kernel_size=width,
                     padding='same', kernel_initializer='he_normal', 
                     activity_regularizer=regularizers.l2(l2_lambda),
                     name='conv'+str(i)+'_up'+str(j))(X)
            if batch_normalization:
                X=BatchNormalization(center=False)(X)
            X=Activation('tanh' if tanh else 'relu')(X)
            X=Dropout(dropout_rate)(X)
            
    # output layer
    if crf is not None: 
        # this is a passed tf2CRF object 
        # - must have loss=crf.loss, metrics=[crf.accuracy] (or similar)
        #   at compile-time (train.py or sandbox.ipynb)
        Y=CRF(n_classes, learn_mode='marginal', test_mode='marginal', activation='softmax')(X)
    elif smooth:
        Y=Conv1D(filters=n_classes, kernel_size=256, padding='same', activation='softmax', name='output_layer')(X)
    else:
        Y=Dense(n_classes, activation='softmax', name='output_layer')(X)

    # done!
    return Model(inputs=X_input, outputs=Y, name='segnet')
Ejemplo n.º 26
0
def Attention_3(X, filters, base):
    F1, F2, F3 = filters

    name_base = base

    X = res_identity(X, filters, name_base + '/Pre_Residual_id')

    X_Trunk = Trunk_block(X, filters, name_base + '/Trunk')

    X = MaxPooling1D(3,
                     strides=2,
                     padding='same',
                     name=name_base + '/Mask/pool_1')(X)

    X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Down')

    X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Up')

    temp_name2 = name_base + "/Mask/Interpool_1"

    # X = Lambda(interpolation, arguments={'ref_tensor': X_Trunk, 'name': temp_name2})(X)
    X = UpSampling1D(size=X_Trunk.shape[1] // X.shape[1], name=temp_name2)(X)

    X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_1')(X)

    X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_1')(X)

    X = Conv1D(F3,
               kernel_size=1,
               strides=1,
               padding='same',
               name=name_base + '/Mask/Interpool_2/conv_1',
               kernel_initializer=glorot_uniform(seed=0))(X)

    X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_2')(X)

    X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_2')(X)

    X = Conv1D(F3,
               kernel_size=1,
               strides=1,
               padding='same',
               name=name_base + '/Mask/Interpool_2/conv_2',
               kernel_initializer=glorot_uniform(seed=0))(X)

    X = Activation('sigmoid', name=name_base + '/Mask/sigmoid')(X)

    X = Multiply(name=name_base + '/Mutiply')([X_Trunk, X])

    X = Add(name=name_base + '/Add')([X_Trunk, X])

    X = res_identity(X, filters, name_base + '/Post_Residual_id')

    return X
Ejemplo n.º 27
0
 def pool(self, tensor):
     tensor = Conv1D(self.filters,
                     1,
                     1,
                     padding="same",
                     activation=self.activation,
                     kernel_regularizer=l2(0.00))(tensor)
     shape = tensor.shape.as_list()  # [batch,lenth,channels]
     scaler = AveragePooling1D(tensor.shape.as_list()[1])(
         tensor)  # [batch,channels]
     scaler = UpSampling1D(shape[1])(scaler)
     return scaler
Ejemplo n.º 28
0
def get_model():
    layers = 4
    units = 6
    force_trainable = False

    x = Input(shape=(num_samples, 1))
    y = x

    def block(y, i, s, trainable):
        s += str(i)
        u = 2**(units + i)
        trainable = (trainable or force_trainable)

        y = Conv1D(u,
                   15,
                   padding='same',
                   activation='relu',
                   name=s + '_conv1d_1',
                   trainable=trainable)(y)
        y = BatchNormalization(name=s + '_norm1', trainable=trainable)(y)
        y = Conv1D(u,
                   15,
                   padding='same',
                   activation='relu',
                   name=s + '_conv1d_2',
                   trainable=trainable)(y)
        y = BatchNormalization(name=s + '_norm2', trainable=trainable)(y)
        return Dropout(0.1)(y)

    # Encoder
    for i in range(layers):
        y = block(y, i, 'enc', i == layers - 1)
        y = MaxPooling1D(4)(y)

    y = block(y, layers, 'enc' + str(layers + 100) + '_', True)

    # Decoder
    for i in reversed(range(layers)):
        y = UpSampling1D(4)(y)
        y = block(y, i, 'dec', i == layers - 1)

    trainable = (layers == 1 or force_trainable)
    y = Dense(256, activation='relu', name='final_relu1',
              trainable=trainable)(y)
    y = Dense(256, activation='relu', name='final_relu2',
              trainable=trainable)(y)
    y = Dense(256,
              activation='softmax',
              dtype='float32',
              name='softmax_final',
              trainable=trainable)(y)

    return Model(inputs=x, outputs=y)
Ejemplo n.º 29
0
def build_model(acti, opti, lr):
    #2. 모델
    inputs = Input(shape=(6, 1), name='input')
    x = Conv1D(filters=50,
               kernel_size=4,
               padding='same',
               strides=2,
               activation=acti)(inputs)
    x = Dropout(0.2)(x)
    # x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=150,
               kernel_size=2,
               padding='same',
               strides=2,
               activation=acti)(x)
    x = Dropout(0.3)(x)
    # x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=150,
               kernel_size=2,
               padding='same',
               strides=2,
               activation=acti)(x)
    x = Dropout(0.3)(x)
    x = UpSampling1D(size=2)(x)
    x = Conv1D(filters=50,
               kernel_size=2,
               padding='same',
               strides=2,
               activation=acti)(x)
    x = Dropout(0.2)(x)
    x = UpSampling1D(size=2)(x)
    x = Flatten()(x)
    outputs = Dense(1)(x)
    model = Model(inputs=inputs, outputs=outputs)

    model.compile(loss='mse',
                  optimizer=opti(learning_rate=lr),
                  metrics=['mae'])

    return model
Ejemplo n.º 30
0
def upsample(filters, apply_dropout=False):
  initializer = tf.random_normal_initializer(0., 0.02)
  result = tf.keras.Sequential()
  result.add(UpSampling1D())
  result.add(tf.keras.layers.Conv1D(filters, kernel_size=25,strides=1,padding='same'))
  result.add(tf.keras.layers.BatchNormalization())

  if apply_dropout:
    result.add(tf.keras.layers.Dropout(0.5))

  result.add(tf.keras.layers.ReLU())

  return result