예제 #1
0
파일: unet.py 프로젝트: dyf/nnspike
def unet(input_shape, output_cats):
    inputs = kl.Input(input_shape)
    c1 = kl.Conv1D(8, 9, activation='relu', padding='same')(inputs)
    c1 = kl.Conv1D(8, 9, activation='relu', padding='same')(c1)
    p1 = kl.MaxPooling1D(pool_size=2)(c1)

    c2 = kl.Conv1D(16, 9, activation='relu', padding='same')(p1)
    c2 = kl.Conv1D(16, 9, activation='relu', padding='same')(c2)
    p2 = kl.MaxPooling1D(pool_size=2)(c2)

    c3 = kl.Conv1D(32, 9, activation='relu', padding='same')(p2)
    c3 = kl.Conv1D(32, 9, activation='relu', padding='same')(c3)
    p3 = kl.MaxPooling1D(pool_size=2)(c3)

    c4 = kl.Conv1D(64, 9, activation='relu', padding='same')(p3)
    c4 = kl.Conv1D(64, 9, activation='relu', padding='same')(c4)
    d4 = kl.Dropout(0.5)(c4)
    p4 = kl.MaxPooling1D(pool_size=2)(d4)

    c5 = kl.Conv1D(128, 9, activation='relu', padding='same')(p4)
    c5 = kl.Conv1D(128, 9, activation='relu', padding='same')(c5)
    d5 = kl.Dropout(0.5)(c5)

    u6 = kl.Conv1D(64, 4, activation='relu', padding='same')(kl.UpSampling1D(size=2)(d5))
    m6 = kl.Concatenate(axis=2)([d4,u6])
    c6 = kl.Conv1D(64, 9, activation = 'relu', padding = 'same')(m6)
    c6 = kl.Conv1D(64, 9, activation = 'relu', padding = 'same')(c6)

    u7 = kl.Conv1D(32, 4, activation='relu', padding='same')(kl.UpSampling1D(size=2)(c6))
    m7 = kl.Concatenate(axis=2)([c3,u7])
    c7 = kl.Conv1D(32, 9, activation = 'relu', padding = 'same')(m7)
    c7 = kl.Conv1D(32, 9, activation = 'relu', padding = 'same')(c7)

    u8 = kl.Conv1D(16, 4, activation='relu', padding='same')(kl.UpSampling1D(size=2)(c7))
    m8 = kl.Concatenate(axis=2)([c2,u8])
    c8 = kl.Conv1D(16, 9, activation = 'relu', padding = 'same')(m8)
    c8 = kl.Conv1D(16, 9, activation = 'relu', padding = 'same')(c8)

    u9 = kl.Conv1D(8, 4, activation='relu', padding='same')(kl.UpSampling1D(size=2)(c8))
    m9 = kl.Concatenate(axis=2)([c1,u9])
    c9 = kl.Conv1D(8, 9, activation = 'relu', padding = 'same')(m9)
    c9 = kl.Conv1D(8, 9, activation = 'relu', padding = 'same')(c9)

    c10 = kl.Dense(output_cats, activation = 'softmax')(c9)

    model = km.Model(inputs=inputs, output=c10)

    model.compile(optimizer = ko.Adam(lr = 1e-4), 
                  loss = 'categorical_crossentropy', 
                  metrics = ['accuracy'])

    print(model.summary())
    return model
예제 #2
0
 def _backwards(n_classes, cfg: FunnyFermatCfg) -> models.Model:
     input = layers.Input(shape=(None, n_classes))
     net = input
     for i in reversed(range(cfg.num_blocks)):
         net = layers.UpSampling1D(size=2)(net)
         channels = cfg.block_init_channels * 2**i
         net = layers.Conv1D(channels, cfg.receptive_width,
                             padding='same')(net)
         with K.name_scope(f"rev_block_{i}"):
             for _ in range(cfg.block_elem):
                 x = net
                 net = layers.Conv1D(channels,
                                     cfg.receptive_width,
                                     padding='same')(net)
                 net = layers.BatchNormalization()(net)
                 net = layers.Activation('relu')(net)
                 net = layers.Conv1D(channels,
                                     cfg.receptive_width,
                                     padding='same')(net)
                 net = layers.BatchNormalization()(net)
                 net = layers.Activation('relu')(net)
                 net = ConstMultiplierLayer()(net)
                 net = layers.add([x, net])
     net = layers.Conv1D(1, cfg.receptive_width, padding="same")(net)
     return models.Model(inputs=[input], outputs=[net])
예제 #3
0
    def __init__(self,
                 layer_count=10
                 ):
        self.layer_count = layer_count
        self.input_seq = kl.Input(shape=(48000, 1))

        encoder = kl.Conv1D(filters=1, kernel_size=3, padding='same', data_format='channels_last', dilation_rate=1,
                            # encoder is developed here
                            activation='relu', use_bias=False)(self.input_seq)
        encoder = kl.MaxPooling1D(pool_size=(2,))(encoder)
        for _ in range(self.layer_count // 2)[1:]:
            encoder = kl.Conv1D(filters=2 ** _, kernel_size=3, padding='same', data_format='channels_last',
                                dilation_rate=1,
                                activation='relu', use_bias=False)(encoder)
            encoder = kl.MaxPooling1D(pool_size=(2,))(encoder)

        decoder = encoder
        for _ in range(self.layer_count // 2)[::-1]:
            decoder = kl.Conv1D(filters=2 ** _, kernel_size=3, padding='same', data_format='channels_last',
                                dilation_rate=1,
                                activation='relu', use_bias=False)(decoder)
            decoder = kl.UpSampling1D(2)(decoder)

        self.model = ke.Model(self.input_seq, decoder)

        sgd = ke.optimizers.SGD(decay=1e-5, momentum=0.9, nesterov=True)

        self.model.compile(optimizer=sgd, loss='mean_squared_error')

        # Naming the Autoencoder to save and read later
        self.model_name = self.model_name.format('#{}_Layers_{}'.format(layer_count, str(datetime.datetime.now())))
def model(num_varibles):

    input = layers.Input(shape=(num_varibles, ))
    reshape = layers.Reshape((num_varibles, 1))(input)

    conv = layers.Conv1D(100, 10, padding='same')(reshape)
    pool = layers.MaxPooling1D(2, padding='same')(conv)
    conv = layers.Conv1D(80, 8, padding='same')(pool)
    pool = layers.MaxPooling1D(2, padding='same')(conv)
    conv = layers.Conv1D(60, 6, padding='same')(pool)
    pool = layers.MaxPooling1D(2, padding='same')(conv)
    conv = layers.Conv1D(40, 4, padding='same')(pool)
    pool = layers.MaxPooling1D(2, padding='same')(conv)
    conv = layers.Conv1D(10, 2, padding='same')(pool)
    pool = layers.MaxPooling1D(2, padding='same')(conv)

    flatten = layers.Flatten()(pool)

    dense = layers.Dense(300, activation="relu")(flatten)
    dense = layers.Dense(200, activation="relu")(dense)
    encoded = layers.Dense(100, activation="relu")(dense)
    dense = layers.Dense(200, activation="relu")(encoded)
    dense = layers.Dense(300, activation="relu")(dense)

    dense = layers.Dense(5760)(dense)
    reshape = layers.Reshape((int(5760 / 10), 10))(dense)

    conv = layers.Conv1D(10, 2, padding='same')(reshape)
    upsample = layers.UpSampling1D(2)(conv)
    conv = layers.Conv1D(40, 4, padding='same')(upsample)
    upsample = layers.UpSampling1D(2)(conv)
    conv = layers.Conv1D(60, 6, padding='same')(upsample)
    upsample = layers.UpSampling1D(2)(conv)
    conv = layers.Conv1D(80, 8, padding='same')(upsample)
    upsample = layers.UpSampling1D(2)(conv)
    conv = layers.Conv1D(100, 10, padding='same')(upsample)
    upsample = layers.UpSampling1D(2)(conv)
    conv = layers.Conv1D(1, 250, padding='same')(upsample)

    output = layers.Flatten()(conv)

    autoencoder = Model(inputs=input, outputs=output, name='Autoencoder')
    encoder = Model(inputs=input, outputs=encoded, name='Encoder')
    autoencoder.compile(optimizer='adam', loss='mse')
    encoder.compile(optimizer='adam', loss='mse')

    return autoencoder, encoder
예제 #5
0
    def de_conv_1d(self, in_put, skip_in, filters, f_size, dr, upsampling_rate):
        dc1d = layers.UpSampling1D(size=upsampling_rate)(in_put)
        dc1d = layers.Conv1D(filters, f_size, padding='causal', dilation_rate=dr, use_bias = False)(dc1d)
        dc1d = layers.BatchNormalization(momentum = self.batch_norm_momentum)(dc1d)
        dc1d = layers.LeakyReLU(alpha=self.leaky_relu_alpha)(dc1d)

        dc1d = layers.Concatenate()([dc1d, skip_in])        #addupd = WeightedResidual()([prelu, skip_in])
        dc1d = layers.Dropout(rate = self.dropout_rate)(dc1d)
        return dc1d
예제 #6
0
 def Multi_task_encoder_two(self,encoder_node_size,filter_size, polling_size,En_L1_reg,En_L2_reg, De_L1_reg,De_L2_reg,Cl_L1_reg,Cl_L2_reg,Input_activ, Hidden_activ, Learning_rate):
     print('underconstruction')
     En_inputs=Input(shape=(self.data.shape[1]-1,))
     self.Encoder_layer=Sequential()
     self.encoder_node_size=encoder_node_size
     self.Encoder_layer=layers.Reshape((self.data.shape[1]-1,1), input_shape=(self.data.shape[1]-1,),name='EL1')(En_inputs)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Input_activ,name='EL2')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Hidden_activ,name='EL3')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Hidden_activ,name='EL4')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL5')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL6')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,padding='same',name='EL7')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL8')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL9')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL10')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,padding='same',name='EL11')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL12')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL13')(self.Encoder_layer)
     self.Encoder_layer=layers.Flatten(name='EL14')(self.Encoder_layer)
     E_out=layers.Dense(encoder_node_size,kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,name='Encoder_output')(self.Encoder_layer)
     self.Decoder_layer=layers.Reshape((encoder_node_size,1),input_shape=(encoder_node_size,),name='DL1')(E_out)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL2')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL3')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL4')(self.Decoder_layer)
     self.Decoder_layer=layers.UpSampling1D(polling_size,name='DL5')((self.Decoder_layer))
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL6')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL7')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL8')(self.Decoder_layer)
     self.Decoder_layer=layers.UpSampling1D(polling_size,name='DL9')((self.Decoder_layer))
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL10')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL11')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL12')(self.Decoder_layer)
     self.Decoder_layer=layers.Flatten(name='DL13')((self.Decoder_layer))
     D_out=layers.Dense(self.data.shape[1]-1,kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Input_activ,name="Decoder_output")((self.Decoder_layer))
     self.Classifier_layer=layers.Dense(int(encoder_node_size/2),kernel_regularizer=regularizers.l1_l2(l1=Cl_L1_reg,l2=Cl_L2_reg),activation=Hidden_activ,name='CL2')(E_out)
     self.Classifier_layer=layers.Dense(int(encoder_node_size/4),kernel_regularizer=regularizers.l1_l2(l1=Cl_L1_reg,l2=Cl_L2_reg),activation=Hidden_activ,name='CL3')(self. Classifier_layer)
     cl_out=layers.Dense(int(len(self.uniques)),kernel_regularizer=regularizers.l1_l2(l1=Cl_L1_reg,l2=Cl_L2_reg),activation='softmax',name="Classifier_output")(self. Classifier_layer)
     self.mymodel=Model(En_inputs,[D_out,cl_out])
     losses = {"Decoder_output":"mse", "Classifier_output": "categorical_crossentropy"}
     mertics={"Decoder_output": "mse", "Classifier_output": 'accuracy'}
     lossWeights = {"Decoder_output": 1, "Classifier_output": 1}
     self.mymodel.compile(optimizer=optimizers.Adam(lr=Learning_rate),loss=losses,loss_weights=lossWeights,metrics=mertics)
     self.build_multi_Encoder_Decoder_separately_two()
예제 #7
0
 def build_network_vgg(self,encoder_node_size,filter_size, polling_size,En_L1_reg,En_L2_reg, De_L1_reg,De_L2_reg,Cl_L1_reg,Cl_L2_reg,Input_activ, Hidden_activ,Learning_rate):
     print('underconstruction')
     En_inputs=Input(shape=(self.data.shape[1]-1,))
     self.Encoder_layer=Sequential()
     self.encoder_node_size=encoder_node_size
     self.Encoder_layer=layers.Reshape((self.data.shape[1]-1,1), input_shape=(self.data.shape[1]-1,),name='EL1')(En_inputs)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Input_activ,name='EL2')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Hidden_activ,name='EL3')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),padding='same',activation=Hidden_activ,name='EL4')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL5')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL6')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,padding='same',name='EL7')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL8')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL9')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL10')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,padding='same',name='EL11')(self.Encoder_layer)
     self.Encoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ, padding='same',name='EL12')(self.Encoder_layer)
     self.Encoder_layer=layers.MaxPooling1D(polling_size,name='EL13')(self.Encoder_layer)
     self.Encoder_layer=layers.Flatten(name='EL14')(self.Encoder_layer)
     E_out=layers.Dense(encoder_node_size,kernel_regularizer=regularizers.l1_l2(l1=En_L1_reg,l2=En_L2_reg),activation=Hidden_activ,name='Encoder_output')(self.Encoder_layer)
     self.Decoder_layer=layers.Reshape((encoder_node_size,1),input_shape=(encoder_node_size,),name='DL1')(E_out)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL2')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL3')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(128,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL4')(self.Decoder_layer)
     self.Decoder_layer=layers.UpSampling1D(polling_size,name='DL5')((self.Decoder_layer))
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL6')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL7')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(64,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL8')(self.Decoder_layer)
     self.Decoder_layer=layers.UpSampling1D(polling_size,name='DL9')((self.Decoder_layer))
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL10')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL11')(self.Decoder_layer)
     self.Decoder_layer=layers.Conv1D(32,filter_size, kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Hidden_activ,padding='same',name='DL12')(self.Decoder_layer)
     self.Decoder_layer=layers.Flatten(name='DL13')((self.Decoder_layer))
     D_out=layers.Dense(self.data.shape[1]-1,kernel_regularizer=regularizers.l1_l2(l1=De_L1_reg,l2=De_L2_reg),activation=Input_activ,name="Decoder_output")((self.Decoder_layer))
     self.mymodel=Model(En_inputs,D_out)
     self.mymodel.compile(optimizer=optimizers.Adam(lr=Learning_rate),loss='mse',metrics=['mse'])
     self.build_multi_Encoder_Decoder_separately_two()
예제 #8
0
def cnn_autoencoder(shape=(500, 64),
                    num_classes=2,
                    input_dim=93,
                    n=1,
                    use_attention=False,
                    **kwargs):
    input_array = keras.Input(shape=(shape[0], ), name='input')

    embedding = Embedding(input_dim=input_dim,
                          output_dim=shape[1],
                          weights=[np.load("weight.npy")])
    embedding.trainable = False
    embedding_output = embedding(input_array)

    _embed = SpatialDropout1D(0.25)(embedding_output)
    kernel_size = 3
    dilated_rate = 1

    num_filters_in = 64
    conv1d = Conv1D(filters=num_filters_in,
                    kernel_size=kernel_size,
                    padding="same")(_embed)
    b = BatchNormalization()(conv1d)
    r = Activation("elu")(b)
    x = r

    conv1 = Conv1D(filters=num_filters_in, kernel_size=1, padding="same")(x)
    b = BatchNormalization()(conv1)
    r = Activation("elu")(b)
    conv2 = Conv1D(filters=num_filters_in, kernel_size=3, padding="same")(r)
    b = BatchNormalization()(conv2)
    r = Activation("elu")(b)

    x = keras.layers.add([x, r])

    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    x = MaxPool1D(pool_size=128, strides=128, data_format='channels_last')(x)
    fc = Activation("sigmoid", name="feature_output")(x)

    fc = layers.UpSampling1D(size=128)(fc)
    fc = BatchNormalization()(fc)
    fc = Activation('elu')(fc)  # add later
    # mid = Flatten(name="feature_output")(fc)

    conv2 = Conv1D(filters=num_filters_in, kernel_size=3, padding="same")(fc)
    b = BatchNormalization()(conv2)
    r = Activation("elu")(b)

    conv1 = Conv1D(filters=num_filters_in, kernel_size=1, padding="same")(r)
    b = BatchNormalization()(conv1)
    r = Activation("elu")(b)

    x = keras.layers.add([fc, r])
    conv1d = Conv1D(filters=num_filters_in,
                    kernel_size=kernel_size,
                    padding="same")(x)
    b = BatchNormalization()(conv1d)
    r = Activation("elu")(b)

    output = r

    output = Lambda(lambda x: keras.losses.mean_squared_error(x[0], x[1]),
                    name='loss',
                    output_shape=(1, ))([output, embedding_output])

    model = Model(inputs=input_array, outputs=output)

    model.compile(loss=loss_first, optimizer='adam')
    model.summary()
    plot_model(model, "attention.png")
    return model
예제 #9
0
 def de_conv_1d_end(self, in_put, skip_in, filters, f_size, dr, upsampling_rate):
     us = layers.UpSampling1D(size=upsampling_rate)(in_put)
     convupd = layers.Conv1D(filters, f_size, padding='causal', dilation_rate=dr, use_bias = True, activation='tanh')(us)
     return convupd
예제 #10
0
def UNet_networkstructure_old(rd_input,
                              conv_window_len,
                              maxpooling_len,
                              BN=True):

    initializer = 'he_normal'  #'glorot_uniform'
    # model part
    conv1 = layers.Conv1D(64, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1 = layers.BatchNormalization()(conv1)
    #conv1 = layers.Activation('relu')(conv1)
    conv1 = layers.Conv1D(64, conv_window_len,  activation='relu', padding='same', \
        kernel_initializer=initializer)(conv1)
    if BN: conv1 = layers.BatchNormalization()(conv1)
    #conv1 = layers.Activation('relu')(conv1)
    pool1 = layers.MaxPooling1D(maxpooling_len[0])(conv1)

    conv2 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool1)
    if BN: conv2 = layers.BatchNormalization()(conv2)
    #conv2 = layers.Activation('relu')(conv2)
    conv2 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv2)
    if BN: conv2 = layers.BatchNormalization()(conv2)
    #conv2 = layers.Activation('relu')(conv2)
    pool2 = layers.MaxPooling1D(maxpooling_len[1])(conv2)

    conv3 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool2)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    #conv3 = layers.Activation('relu')(conv3)
    conv3 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv3)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    #conv3 = layers.Activation('relu')(conv3)
    drop3 = layers.Dropout(0.5)(conv3)
    pool3 = layers.MaxPooling1D(maxpooling_len[2])(drop3)

    conv4 = layers.Conv1D(512, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool3)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    #conv4 = layers.Activation('relu')(conv4)
    conv4 = layers.Conv1D(512, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv4)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    #conv4 = layers.Activation('relu')(conv4)
    drop4 = layers.Dropout(0.5)(conv4)

    up5 = layers.Conv1D(256, conv_window_len-1, activation='relu', padding='same', \
        kernel_initializer=initializer)(layers.UpSampling1D(maxpooling_len[3])(drop4))
    if BN: up5 = layers.BatchNormalization()(up5)
    #up5 = layers.Activation('relu')(up5)
    merge5 = layers.Concatenate(-1)([drop3, up5])
    conv5 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(merge5)
    if BN: conv5 = layers.BatchNormalization()(conv5)
    #conv5 = layers.Activation('relu')(conv5)

    up6 = layers.Conv1D(128, conv_window_len-1, activation = 'relu', padding='same', \
        kernel_initializer=initializer)(layers.UpSampling1D(maxpooling_len[4])(conv5))
    if BN: up6 = layers.BatchNormalization()(up6)
    #up6 = layers.Activation('relu')(up6)
    merge6 = layers.Concatenate(-1)([conv2, up6])
    conv6 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(merge6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    #conv6 = layers.Activation('relu')(conv6)
    conv6 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    #conv6 = layers.Activation('relu')(conv6)

    up7 = layers.Conv1D(64, conv_window_len-1, activation = 'relu', padding='same', \
        kernel_initializer=initializer)(layers.UpSampling1D(maxpooling_len[5])(conv6))
    if BN: up7 = layers.BatchNormalization()(up7)
    #up7 = layers.Activation('relu')(up7)
    merge7 = layers.Concatenate(-1)([conv1, up7])
    conv7 = layers.Conv1D(64, conv_window_len, activation= 'relu', padding='same',\
        kernel_initializer=initializer)(merge7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    #conv7 = layers.Activation('relu')(conv7)
    conv7 = layers.Conv1D(64, conv_window_len, activation= 'relu', padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    #conv7 = layers.Activation('relu')(conv7)

    conv8 = layers.Conv1D(2, conv_window_len, activation= 'relu', padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv8 = layers.BatchNormalization()(conv8)
    #conv8 = layers.Activation('relu')(conv8)
    conv8 = layers.Dropout(0.5)(conv8)
    conv9 = layers.Conv1D(1, 1, activation='sigmoid')(conv8)

    model = models.Model(rd_input, conv9)

    return model
예제 #11
0
def UNet_networkstructure_crf(rd_input,
                              conv_window_len,
                              maxpooling_len,
                              BN=True,
                              DropoutRate=0.5):

    initializer = 'he_normal'  #'glorot_uniform'

    ##################### Conv1 #########################
    conv1 = layers.Conv1D(64, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1 = layers.BatchNormalization()(conv1)
    #conv1 = layers.Activation('relu')(conv1)

    conv1 = layers.Conv1D(64, conv_window_len,  activation='relu', padding='same', \
        kernel_initializer=initializer)(conv1)
    if BN: conv1 = layers.BatchNormalization()(conv1)

    #conv1 = layers.Activation('relu')(conv1)
    pool1 = layers.MaxPooling1D(maxpooling_len[0])(conv1)

    ##################### Conv2 ##########################
    conv2 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool1)
    if BN: conv2 = layers.BatchNormalization()(conv2)
    #conv2 = layers.Activation('relu')(conv2)

    conv2 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv2)
    if BN: conv2 = layers.BatchNormalization()(conv2)

    #conv2 = layers.Activation('relu')(conv2)
    pool2 = layers.MaxPooling1D(maxpooling_len[1])(conv2)

    ##################### conv3 ###########################
    conv3 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool2)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    #conv3 = layers.Activation('relu')(conv3)

    conv3 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv3)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    #conv3 = layers.Activation('relu')(conv3)
    if DropoutRate > 0:
        drop3 = layers.Dropout(DropoutRate)(conv3)
    else:
        drop3 = conv3
    pool3 = layers.MaxPooling1D(maxpooling_len[2])(drop3)

    ####################  conv4 (U bottle) #####################
    conv4 = layers.Conv1D(512, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(pool3)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    #conv4 = layers.Activation('relu')(conv4)

    conv4 = layers.Conv1D(512, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv4)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    #conv4 = layers.Activation('relu')(conv4)
    if DropoutRate > 0:
        drop4 = layers.Dropout(DropoutRate)(conv4)
    else:
        drop4 = conv4

    ################### upSampling, upConv5 ##########################
    up5 = layers.UpSampling1D(maxpooling_len[3])(drop4)
    merge5 = layers.Concatenate(-1)([drop3, up5])

    conv5 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(merge5)
    if BN: conv5 = layers.BatchNormalization()(conv5)
    #conv5 = layers.Activation('relu')(conv5)

    conv5 = layers.Conv1D(256, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(conv5)
    if BN: conv5 = layers.BatchNormalization()(conv5)

    ################### upConv 6 ##############################
    up6 = layers.UpSampling1D(maxpooling_len[4])(conv5)
    merge6 = layers.Concatenate(-1)([conv2, up6])

    conv6 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same', \
        kernel_initializer=initializer)(merge6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    #conv6 = layers.Activation('relu')(conv6)

    conv6 = layers.Conv1D(128, conv_window_len, activation='relu', padding='same',\
        kernel_initializer=initializer)(conv6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    #conv6 = layers.Activation('relu')(conv6)

    ################### upConv 7 #########################
    up7 = layers.UpSampling1D(maxpooling_len[5])(conv6)
    merge7 = layers.Concatenate(-1)([conv1, up7])

    conv7 = layers.Conv1D(64, conv_window_len, activation= 'relu', padding='same',\
        kernel_initializer=initializer)(merge7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    #conv7 = layers.Activation('relu')(conv7)

    conv7 = layers.Conv1D(64, conv_window_len, activation= 'relu', padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    #conv7 = layers.Activation('relu')(conv7)

    ################## final output ######################
    conv8 = layers.Conv1D(2, conv_window_len, activation= 'relu', padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv8 = layers.BatchNormalization()(conv8)
    #conv8 = layers.Activation('relu')(conv8)

    if DropoutRate > 0:
        conv8 = layers.Dropout(DropoutRate)(conv8)

    conv9 = layers.Conv1D(1, 1, activation='sigmoid')(conv8)
    crf = CRF(2, sparse_target=True)
    conv9 = crf(conv9)

    model = models.Model(rd_input, conv9)

    return (model, crf)
예제 #12
0
def build_vae_model(n_samples, n_ch):
    n_ch = 1
    ir_shape = (n_samples, n_ch)
    batch_size = 10
    latent_dim = 2

    input_ir = layers.Input(shape=ir_shape)

    # build the encoder
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(input_ir)
    x = layers.MaxPool1D(2, padding='same')(x)
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(x)
    x = layers.MaxPool1D(2, padding='same')(x)
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(x)
    x = layers.MaxPool1D(2, padding='same')(x)
    x = layers.Flatten()(x)
    x = layers.Dense(8, activation='relu')(x)

    z_mean = layers.Dense(latent_dim)(x)
    z_log_var = layers.Dense(latent_dim)(x)

    def sampling(args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                                  mean=0.,
                                  stddev=1.)

        return z_mean + K.exp(z_log_var) * epsilon

    z = layers.Lambda(sampling)([z_mean, z_log_var])

    encoder = Model(input_ir, z)
    encoder.summary()

    # build the decoder
    input_z = layers.Input(shape=K.int_shape(z)[1:])

    x = layers.Dense(8, activation='relu')(input_z)
    x = layers.Dense(n_samples * 4, activation='relu')(x)
    x = layers.Reshape((int(n_samples / 8), 32))(x)
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(x)
    x = layers.UpSampling1D(2)(x)
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(x)
    x = layers.UpSampling1D(2)(x)
    x = layers.Conv1D(32, 16, padding='same', activation='relu')(x)
    x = layers.UpSampling1D(2)(x)
    x = layers.Conv1D(n_ch, 16, padding='same', activation='tanh')(x)

    decoder = Model(input_z, x)
    decoder.summary()

    z_decoded = decoder(z)

    class CustomVariationalLayer(keras.layers.Layer):
        def vae_loss(self, x, z_decoded):
            x = K.flatten(x)
            z_decoded = K.flatten(z_decoded)
            xent_loss = keras.metrics.mean_squared_error(x, z_decoded)
            kl_loss = -5e-4 * K.mean(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
            #kl_loss = 0.5 * K.sum(K.exp(z_log_var) + K.square(z_mean) - 1. - z_log_var, axis=1)

            return xent_loss + kl_loss

        def call(self, inputs):
            x = inputs[0]
            z_decoded = inputs[1]
            loss = self.vae_loss(x, z_decoded)
            self.add_loss(loss, inputs=inputs)
            return x

    y = CustomVariationalLayer()([input_ir, z_decoded])

    vae = Model(input_ir, y)
    vae.compile(optimizer=optimizers.Adam(), loss=None)
    vae.summary()

    return encoder, decoder, vae
예제 #13
0
파일: vae.py 프로젝트: tedinburgh/deepclean
latent_mean = layers.Input(shape=(latent_dim, ))
latent_log_var = layers.Input(shape=(latent_dim, ))
z = layers.Lambda(sampling,
                  output_shape=(latent_dim, ))([latent_mean, latent_log_var])
sampler = models.Model([latent_mean, latent_log_var], z)

# ---------------------------------------------------------------------------
# DECODER

latent_input = layers.Input(shape=(latent_dim, ))
x = layers.Dense(16, activation='relu')(latent_input)
x = layers.Dense(np.prod(shape[1:]), activation='relu')(x)
x = layers.Dropout(rate=0.1)(x)
x = layers.Reshape(shape[1:])(x)
x = layers.Conv1D(16, 15, padding='same', activation='relu')(x)
x = layers.UpSampling1D(5)(x)
cropping_up1 = K.int_shape(x)[1] - shape_mp2[1]
x = layers.Cropping1D((0, cropping_up1))(x)
x = layers.Dropout(rate=0.1)(x)
x = layers.Conv1D(8, 15, padding='same', activation='relu')(x)
x = layers.UpSampling1D(5)(x)
cropping_up2 = K.int_shape(x)[1] - shape_mp1[1]
x = layers.Cropping1D((0, cropping_up2))(x)
output = layers.Conv1D(1, 15, padding='same', activation=None)(x)

decoder = models.Model(latent_input, output)
decoder.summary()

# ---------------------------------------------------------------------------
# VAE