示例#1
0
def createModel(_convSize, _maxPoolingSize, _activation):
    inputWindow = Input(shape=(maxLength, 1))

    layer = Conv1D(_convSize, 2, activation='relu',
                   padding='same')(inputWindow)
    layer = MaxPooling1D(_maxPoolingSize, padding='same')(layer)
    layer = Conv1D(1, 2, activation='relu', padding='same')(layer)
    encoded = MaxPooling1D(1, padding='same')(layer)

    encoder = Model(inputWindow, encoded)

    layer = Conv1D(1, 2, activation='relu', padding='same')(encoded)
    layer = UpSampling1D(1)(layer)
    layer = Conv1D(_convSize, 1, activation='relu')(layer)
    layer = UpSampling1D(_maxPoolingSize)(layer)
    decoded = Conv1D(1, 2, activation=_activation, padding='same')(layer)

    autoencoder = Model(inputWindow, decoded)
    autoencoder.summary()

    autoencoder.compile(optimizer='adam',
                        loss='mean_squared_error',
                        metrics=['mean_squared_error', 'mean_absolute_error'])

    return encoder, autoencoder, encoded
def build_model(input_shape):
    l0 = Input(shape=input_shape)

    #l11 = Lambda(lambda x: x[:, :5])(l0)
    #l12 = Lambda(lambda x: x[:, 5:])(l0)

    x = Dense(96, activation='relu')(l0)
    x = Reshape((96, 1))(x)
    x = Conv1D(48, 5, activation='relu', padding='same')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(48, 3, activation='relu', padding='same')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(1, 5, activation='relu', padding='same')(x)
    #x = Flatten()(x)
    #x = Concatenate()([x, l11])
    #x = Dense(24, activation='relu')(x)
    #x = Reshape((24,1))(x)
    x = UpSampling1D(size=2)(x)
    x = Conv1D(48, 3, activation='relu', padding='same')(x)
    x = UpSampling1D(size=2)(x)
    x = Conv1D(48, 5, activation='relu', padding='same')(x)
    x = Conv1D(1, 5, padding='same')(x)
    x = Flatten()(x)

    model = Model(inputs=l0, outputs=x)
    model.compile(loss='mean_squared_error', optimizer='Adam')
    print(model.summary())
    return model
示例#3
0
    def build(self):
        model = Sequential()

        model.add(InputLayer(input_shape=(self.n_inputs, 1), name="in"))

        # encoder = Sequential(name="encoder")
        model.add(Conv1D(16, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPool1D(2))
        model.add(Conv1D(32, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPool1D(2))
        model.add(Conv1D(32, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPool1D(2))
        model.add(Conv1D(64, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(MaxPool1D(2))
        model.add(Conv1D(128, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))

        model.add(MaxPool1D(2, name="latent"))
        # model.add(encoder)

        # decoder = Sequential(name="decoder")
        model.add(Conv1D(128, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(UpSampling1D(2))
        model.add(Conv1D(64, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(UpSampling1D(2))
        model.add(Conv1D(32, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(UpSampling1D(2))
        model.add(Conv1D(32, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(UpSampling1D(2))
        model.add(Conv1D(16, 3, padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        model.add(UpSampling1D(2))
        model.add(Conv1D(1, 3, name="out", padding="same", use_bias=False))
        model.add(BatchNormalization())
        model.add(Activation("relu"))
        # model.add(decoder)

        model.summary()
        model.compile(optimizer="adam", loss="mse", metrics=["acc"])
        self.model = model

        self.build_encoder()
示例#4
0
def deep_1d_autoencoder():

	# ENCODER
	input_sig = Input(batch_shape=(None,800,1))
	x = Conv1D(256,32, activation='relu', padding='same')(input_sig)
	x1 = MaxPooling1D(2)(x)
	x2 = Conv1D(256,32, activation='relu', padding='same')(x1)
	x3 = MaxPooling1D(2)(x2)
	x4 = Conv1D(128,16, activation='relu', padding='same')(x3)
	x5 = MaxPooling1D(2)(x4)
	x6 = Conv1D(128,16, activation='relu', padding='same')(x5)
	x7 = MaxPooling1D(2)(x6)
	x8 = Conv1D(64,8, activation='relu', padding='same')(x7)
	x9 = MaxPooling1D(2)(x8)
	flat = Flatten()(x9)
	encoded = Dense(32,activation = 'relu')(flat)
    
	x8_ = Conv1D(64, 8, activation='relu', padding='same')(x9)
	x7_ = UpSampling1D(2)(x8_)
	x6_ = Conv1D(128, 16, activation='relu', padding='same')(x7_)
	x5_ = UpSampling1D(2)(x6_)
	x4_ = Conv1D(128, 16, activation='relu', padding='same')(x5_)
	x3_ = UpSampling1D(2)(x4_)
	x2_ = Conv1D(256, 32, activation='relu', padding='same')(x3_)
	x1_ = UpSampling1D(2)(x2_)
	x_ = Conv1D(256, 32, activation='relu', padding='same')(x1_)
	upsamp = UpSampling1D(2)(x_)
	flat = Flatten()(upsamp)
	decoded = Dense(800,activation = 'relu')(flat)
	decoded = Reshape((800,1))(decoded)
	
	#print("shape of decoded {}".format(keras.int_shape(decoded)))

	return input_sig, decoded
示例#5
0
def define_generator(latent_dim):
    # image generator input
    in_lat = Input(shape=(latent_dim, ))
    # foundation for 3x4 sample
    n_nodes = 100 * 1
    gen = Dense(n_nodes)(in_lat)
    gen = LeakyReLU(alpha=0.2)(gen)
    gen = Reshape((1, 100))(gen)

    gen = UpSampling1D(size=12)(gen)
    gen = Conv1D(64, (3), strides=(2), padding='same')(gen)
    gen = LeakyReLU(alpha=0.2)(gen)

    gen = UpSampling1D(size=8)(gen)
    gen = Conv1D(64, (3), strides=(2), padding='same')(gen)
    gen = LeakyReLU(alpha=0.2)(gen)
    # output
    out_layer = Conv1D(1, (2), activation='tanh', padding='same')(gen)
    # define model
    model = Model(in_lat, out_layer)
    return model


##### plot the Generator
# g_model = define_generator(100)
# plot_model(g_model, to_file='./images/generator_hybrid.png', show_shapes=True, show_layer_names=True)
示例#6
0
def make_model():
    num_leads_signal = 12
    model = Sequential()

    model.add(
        Conv1D(32,
               kernel_size=8,
               activation=K.elu,
               input_shape=(None, num_leads_signal),
               padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))

    model.add(Bidirectional(LSTM(30, return_sequences=True)))

    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(Dense(4, activation='softmax'))

    metric = Metrics()
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', metric.Se, metric.PPV])
    return model
示例#7
0
def make_model(num_leads_signal):
    model = Sequential()

    model.add(
        Conv1D(32,
               kernel_size=8,
               activation=K.elu,
               input_shape=(None, num_leads_signal),
               padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))

    model.add(Bidirectional(LSTM(50, return_sequences=True)))

    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(Dense(4, activation='softmax'))

    return model
示例#8
0
def autoencoder_Conv2(X):
    inputs = Input(shape=(X.shape[1], X.shape[2]))
    L1 = Conv1D(16, 4, activation="relu", dilation_rate=1,
                padding="same")(inputs)
    L2 = MaxPooling1D(2)(L1)
    L3 = Conv1D(32, 4, activation="relu", dilation_rate=2, padding="same")(L2)
    L4 = MaxPooling1D(2)(L3)
    L5 = Conv1D(64, 4, activation="relu", dilation_rate=2, padding="same")(L4)
    L6 = MaxPooling1D(4)(L5)
    L7 = Conv1D(128, 8, activation="relu", dilation_rate=2, padding="same")(L6)
    encoded = MaxPooling1D(4)(L7)
    L7 = Conv1D(128, 8, activation="relu", dilation_rate=2,
                padding="same")(encoded)
    L8 = UpSampling1D(4)(L7)
    L9 = Conv1D(64, 4, activation="relu", dilation_rate=2, padding="same")(L8)
    L10 = UpSampling1D(4)(L9)
    L11 = Conv1D(32, 4, activation="relu", dilation_rate=2,
                 padding="same")(L10)
    L12 = UpSampling1D(4)(L11)
    L13 = Conv1D(16, 3, activation="relu", dilation_rate=1,
                 padding="same")(L12)
    L14 = UpSampling1D(2)(L13)
    output = Conv1D(1, 4, activation="relu", dilation_rate=1,
                    padding="same")(L12)
    model = Model(inputs=inputs, outputs=output)
    return model
示例#9
0
def define_generator(latent_dim):
    # image generator input
    in_lat = Input(shape=(latent_dim, ))
    n_nodes = latent_dim * 1
    gen = Dense(n_nodes)(in_lat)
    gen = UpSampling1D(size=2)(gen)
    gen = Dense(200, activation='relu')(gen)
    gen = Dense(300, activation='relu')(gen)
    gen = Dense(600, activation='relu')(gen)
    gen = UpSampling1D(size=2)(gen)
    gen = Dense(1184, activation='relu')(gen)
    gen = Reshape((592, 2))(gen)
    gen = Dense(1200, activation='relu')(gen)
    gen = Conv1D(100, (3), strides=(2), padding='same')(gen)
    gen = UpSampling1D(size=2)(gen)
    gen = Dense(2400, activation='relu')(gen)
    gen = UpSampling1D(size=2)(gen)
    gen = Dense(1200, activation='relu')(gen)
    gen = Conv1D(600, (3), strides=(2), padding='same')(gen)
    gen = Dense(600, activation='relu')(gen)
    gen = Conv1D(600, (3), strides=(2), padding='same')(gen)
    gen = Dense(296, activation='sigmoid')(gen)
    # gen = Reshape((600, 2))(gen)
    out_layer = Dense(1, activation='sigmoid')(gen)

    # output
    # define model
    model = Model(in_lat, out_layer)
    return model
    def build(self):
        model = Sequential()

        model.add(InputLayer(input_shape=(self.n_inputs, 1), name="in"))

        encoder = Sequential(name="encoder")
        encoder.add(Conv1D(512, 3, activation="relu", padding="same"))
        encoder.add(MaxPool1D(2))
        encoder.add(Conv1D(256, 3, activation="relu", padding="same"))
        encoder.add(MaxPool1D(2))
        encoder.add(Conv1D(128, 3, activation="relu", padding="same"))
        encoder.add(MaxPool1D(2))
        encoder.add(Conv1D(64, 3, activation="relu", padding="same"))
        model.add(encoder)

        model.add(MaxPool1D(2, name="latent"))

        decoder = Sequential(name="decoder")
        decoder.add(Conv1D(64, 2, activation="relu", padding="same"))
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(128, 2, activation="relu", padding="same"))
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 2, activation="relu", padding="same"))
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(512, 2, activation="relu", padding="same"))
        decoder.add(UpSampling1D(2))
        model.add(decoder)

        model.add(Conv1D(1, 2, name="out", padding="same"))

        model.summary()
        model.compile(optimizer="adam", loss="mse")
        self.model = model
示例#11
0
def BuildCNNNetRaw(input_size,lost):  # number of nodes in first layer. in this case 126.
    
    input_signal = Input(shape=(input_size,2))

    # our data is 1d. however, 1d doesn't work well in this package, so, we do 2d and set the the second dimension to 1.
    x = ZeroPadding1D((3))(input_signal)  #this means you run it on signal
    x = Conv1D(10,5, activation='linear', padding='same')(x)  #this means you run it on x, and so forth in the next lines.
    x = Conv1D(20,5, activation='relu',padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Conv1D(5,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((4))(x)
    x = Conv1D(2,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Conv1D(5,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Flatten()(x)
    encoded = Dense(32, activation='relu')(x)

    x = Dense(126, activation='relu')(encoded)
    x = Reshape((63,2))(x)
    x = UpSampling1D((2))(x)
    x = Conv1D(5,5, activation='relu',padding='same')(x)
    x = UpSampling1D((2))(x)
    decoded = Conv1D(2,3, activation='linear')(x)    
    
    autoencoder = Model(input_signal, x)
    #Adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, decay=0.0)
    autoencoder.compile(optimizer='adam', loss=lost)
    
    encoder = Model(input= input_signal, output=encoded)
    encoder.compile(optimizer='adam', loss=lost)
    
    return autoencoder, encoder  # returns the two different models, already compiled.
示例#12
0
    def get_unit(self,
                 name,
                 prev,
                 n_out,
                 dropout=False,
                 activate=False,
                 skip=False,
                 bn=False,
                 nl_activ=False):

        inp = prev

        kernel_reg = None
        if self.do_weight_reg:
            kernel_reg = l2(self.do_weight_reg)
        dense_layer = Dense(n_out,
                            use_bias=True,
                            name="%s_dense" % name,
                            kernel_regularizer=kernel_reg)
        dense = dense_layer(prev)

        if bn:
            dense = BatchNormalization(name="%s_bn" % name, momentum=.5)(dense)

        if dropout:
            dense = Dropout(dropout, name="%s_dropout" % name)(dense)

        if nl_activ != False:
            typ = "sigmoid" if type(nl_activ) != str else nl_activ
            output_layer = Activation(typ, name="%s_activ" % name)
        else:
            ## output_layer = Activation("relu",name="%s_activ"%name)
            ## output_layer = LeakyReLU(name="%s_activ"%name)
            output_layer = PReLU(name="%s_activ" % name)
        output = output_layer(dense)

        if skip:
            if dense_layer.output_shape[2] > dense_layer.input_shape[2]:
                up_layer = UpSampling1D(dense_layer.output_shape[2] /
                                        dense_layer.input_shape[2],
                                        name="%s_up" % name)
                up = up_layer(inp)
                up = Reshape(dense_layer.output_shape[1:],
                             name="%s_up_reshape" % name)(up)
                print(up_layer.input_shape, up_layer.output_shape)
            else:
                up = inp
                if dense_layer.output_shape[2] < dense_layer.input_shape[2]:
                    up_dense_layer = UpSampling1D(dense_layer.input_shape[2] /
                                                  dense_layer.output_shape[2],
                                                  name="%s_up" % name)
                    output = up_dense_layer(output)
                    output = Reshape(dense_layer.input_shape[1:],
                                     name="%s_up_reshape" % name)(output)
                    print(up_dense_layer.input_shape,
                          up_dense_layer.output_shape)

            output = Add(name="%s_skip" % name)([output, up])

        return output
示例#13
0
def make_z2x_upsampling():
    encoded = Input(shape=(6, 1), name='encoder_input')
    x = Conv1D(8, 3, activation='relu', padding='same')(encoded)
    x = UpSampling1D(3)(x)
    x = Conv1D(8, 3, activation='relu')(x)
    x = UpSampling1D(3)(x)
    x = Conv1D(16, 3, activation='relu')(x)
    x = UpSampling1D(3)(x)
    x = Conv1D(16, 3, activation='relu')(x)
    x = UpSampling1D(3)(x)
    x = Conv1D(16, 3, activation='relu')(x)
    x = UpSampling1D(3)(x)
    # Using conv only, the results are 48 dim
    decoded = Conv1D(1, 3, activation='sigmoid', padding='same')(x)
    # Thus, I'm using Dense to convert it to 784
    # decoded = Dense(784, activation='sigmoid')(Flatten()(x))
    z2x_model = Model(encoded, Flatten()(decoded), name='z2x')

    def z2x(z):
        z = np.expand_dims(z, 2)
        z = tf.convert_to_tensor(z, dtype='float32')
        val = K.eval(z2x_model(z))
        # return np.squeeze(val)
        return val

    return z2x
示例#14
0
def autoencoder_ConvDNN(X):
    inputs = Input(shape=(X.shape[1], X.shape[2]))
    L1 = Conv1D(32, 3, activation="relu", padding="same")(inputs)  # 10 dims
    # x = BatchNormalization()(x)
    L2 = MaxPooling1D(2, padding="same")(L1)  # 5 dims
    L3 = Conv1D(10, 3, activation="relu", padding="same")(L2)  # 5 dims
    # x = BatchNormalization()(x)
    encoded = MaxPooling1D(4, padding="same")(L3)  # 3 dims
    x = Flatten()(encoded)
    x = Dense(64, activation='relu')(x)
    x = Dense(20, activation='relu')(x)
    x = Dense(10, activation='relu')(x)
    x = Dense(20, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    x = Dense(130, activation='relu')(x)
    x = Reshape((13, 10))(x)
    # 3 dimensions in the encoded layer
    L4 = Conv1D(10, 3, activation="relu", padding="same")(x)  # 3 dims
    # x = BatchNormalization()(x)
    L5 = UpSampling1D(4)(L4)  # 6 dims
    L6 = Conv1D(32, 2, activation='relu')(L5)  # 5 dims
    # x = BatchNormalization()(x)
    L7 = UpSampling1D(2)(L6)  # 10 dims
    output = Conv1D(1, 3, activation='sigmoid', padding='same')(L7)  # 10 dims
    model = Model(inputs=inputs, outputs=output)
    return model
示例#15
0
文件: extractor.py 项目: pa1511/REPD
    def __init_model(self, vecs_shape):
        print(vecs_shape)
        input_vec = Input(shape=(vecs_shape[1], 1))
        x = Convolution1D(16, 3, activation='relu', padding='same')(input_vec)
        x = MaxPooling1D(2, padding='same')(x)
        x = Convolution1D(8, 3, activation='relu', padding='same')(x)
        x = MaxPooling1D(2, padding='same')(x)
        x = Convolution1D(8, 3, activation='relu', padding='same')(x)
        encoder = MaxPooling1D(2, padding='same')(x)

        y = Flatten()(x)
        y = Dense(100, activation='softmax')(y)

        x = Convolution1D(8, 3, activation='relu', padding='same')(encoder)
        x = UpSampling1D(2)(x)
        x = Convolution1D(8, 3, activation='relu', padding='same')(x)
        x = UpSampling1D(2)(x)
        x = Convolution1D(16, 3, activation='relu', padding='same')(x)
        x = UpSampling1D(2)(x)
        decoder = Convolution1D(1, 3, activation='sigmoid', padding='same')(x)

        encoder_model = Model(input_vec, y)
        autoencoder = Model(input_vec, decoder)

        return autoencoder, encoder_model
示例#16
0
def autoencoder_ConvLSTM(X):
    inputs = Input(shape=(X.shape[1], X.shape[2]))
    L1 = Conv1D(16, 3, activation="relu", padding="same")(inputs)  # 10 dims
    # x = BatchNormalization()(x)
    L2 = MaxPooling1D(4, padding="same")(L1)  # 5 dims
    L3 = Conv1D(10, 3, activation="relu", padding="same")(L2)  # 5 dims
    # x = BatchNormalization()(x)
    encoded = MaxPooling1D(4, padding="same")(L3)  # 3 dims
    x = Reshape((70, 1))(encoded)

    x = LSTM(32,
             activation='relu',
             return_sequences=False,
             kernel_regularizer=regularizers.l2(0.00))(x)
    x = RepeatVector(70)(x)
    x = LSTM(32, activation='relu', return_sequences=True)(x)
    out = TimeDistributed(Dense(1))(x)

    x = Reshape((7, 10))(out)
    # 3 dimensions in the encoded layer
    L4 = Conv1D(10, 3, activation="relu", padding="same")(x)  # 3 dims
    # x = BatchNormalization()(x)
    L5 = UpSampling1D(4)(L4)  # 6 dims
    L6 = Conv1D(32, 2, activation='relu')(L5)  # 5 dims
    # x = BatchNormalization()(x)
    L7 = UpSampling1D(4)(L6)  # 10 dims
    output = Conv1D(1, 3, activation='sigmoid', padding='same')(L7)  # 10 dims
    model = Model(inputs=inputs, outputs=output)
    return model
示例#17
0
def _create_model_CNN():
    model = Sequential()

    model.add(
        Conv1D(filters=32,
               kernel_size=3,
               padding='same',
               activation='tanh',
               input_shape=(_IN_X, _IN_Y)))
    model.add(MaxPooling1D(2))
    model.add(
        Conv1D(filters=64, kernel_size=3, padding='same', activation='tanh'))
    model.add(MaxPooling1D(2))
    model.add(
        Conv1D(filters=128, kernel_size=3, padding='same', activation='tanh'))

    model.add(
        Conv1D(filters=128, kernel_size=3, padding='same', activation='tanh'))
    model.add(UpSampling1D(2))
    model.add(
        Conv1D(filters=64, kernel_size=3, padding='same', activation='tanh'))
    model.add(UpSampling1D(2))
    model.add(
        Conv1D(filters=1, kernel_size=3, padding='same', activation='tanh'))

    return model
def main(input_file, model_file):
    assert os.path.exists(input_file)
    print("Loading data")
    data = pd.read_csv(input_file, header=None)
    data = data.values
    sdata = data.shape
    data = np.reshape(data, [sdata[0], sdata[1], 1])
    print("Loaded data")

    # Autoencoder parameters
    inputDim = sdata[1]
    latentDim = 64
    filters = 128
    dictionarySize = 22
    lr = 1e-4
    epochs = 50
    batch_size = 512
    validation_split = 0.1

    # Encoder
    x0 = Input(shape=(inputDim, 1), name='encoder_input')
    x1 = Conv1D(filters, 7, padding='same', activation='relu',
                name='conv1')(x0)
    x2 = MaxPooling1D(2, name='max_pool1')(x1)
    x3 = Conv1D(filters, 5, padding='same', activation='relu',
                name='conv2')(x2)
    x4 = MaxPooling1D(2, name='max_pool2')(x3)
    sx4 = K.int_shape(x4)
    x5 = Flatten()(x4)
    sx5 = K.int_shape(x5)
    latent = Dense(latentDim, name='latent_vector')(x5)

    # Decoder
    y1 = Dense(sx5[1])(latent)
    y2 = Reshape((sx4[1], sx4[2]))(y1)
    y3 = UpSampling1D(2, name='up_sample1')(y2)
    y4 = Conv1D(filters, 5, padding='same', activation='relu',
                name='decon1')(y3)
    y5 = UpSampling1D(2, name='up_sample2')(y4)
    output = Conv1D(dictionarySize,
                    7,
                    padding='same',
                    activation='softmax',
                    name='decon2')(y5)

    # Autoencoder
    autoencoder = Model(x0, output, name='autoencoder')
    autoencoder.summary()
    autoencoder.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr))
    hist = autoencoder.fit(data,
                           to_categorical(data),
                           epochs=epochs,
                           batch_size=batch_size,
                           validation_split=validation_split)

    autoencoder.save(model_file)
    hist = pd.DataFrame(hist.history)
    with open(model_file + '.loss', mode='w') as f:
        hist.to_csv(f)
示例#19
0
def make_timeseries_regressor(window_size,
                              filter_length,
                              nb_input_series=1,
                              nb_outputs=1,
                              nb_filter=4):
    # conv1d input:
    # (batch_size, steps, input_dim) ==> steps: sequence_len, input_dim: vector_len
    # conv1d output:  (nb_filter, vector_len)
    #  (batch_size, new_steps, filters) ==>  new_steps < sequence_len , filters: any value
    # filter_length: it's internal parameter
    # nd_outputs: == nb_inputs == vector_len?
    model = Sequential((
        Conv1D(filters=nb_filter,
               kernel_size=filter_length,
               padding='same',
               activation='relu',
               input_shape=(window_size, nb_input_series)),
        MaxPooling1D(
            padding='same'),  # Downsample the output of convolution by 2X.
        BatchNormalization(epsilon=1e-5),
        Conv1D(filters=int(nb_filter / 2),
               kernel_size=filter_length,
               padding='same',
               activation='relu'),
        MaxPooling1D(padding='same'),  # seq_len/4
        BatchNormalization(epsilon=1e-5),

        # add another layer but not helpful
        # results: loss=1.5e-5, val_loss=2.2e-5
        #Conv1D(filters=int(nb_filter/2), kernel_size=filter_length, padding='same', activation='relu'),
        #AveragePooling1D(padding='same'), # seq_len/4
        #BatchNormalization(epsilon=1e-5),
        #UpSampling1D(),
        #Conv1D(filters=int(nb_filter/2), kernel_size=filter_length, padding='same', activation='relu'),
        #BatchNormalization(epsilon=1e-5),
        ## 3rd layer end
        UpSampling1D(),
        Conv1D(filters=int(nb_filter / 2),
               kernel_size=filter_length,
               padding='same',
               activation='relu'),
        BatchNormalization(epsilon=1e-5),
        UpSampling1D(),
        Conv1D(filters=nb_filter,
               kernel_size=filter_length,
               padding='same',
               activation='relu'),
        BatchNormalization(epsilon=1e-5),
        Dense(
            nb_outputs, activation='linear'
        ),  # For binary classification, change the activation to 'sigmoid'
        # output: nb_filter*seq_len/4, nb_outputs
    ))
    #model.compile(loss='mse', optimizer='sgd', metrics=['mae', 'acc'])
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])
    # To perform (binary) classification instead:
    # model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy'])
    return model
示例#20
0
def build_model(X_train, X_val=None):
    epochs = 145
    batch_size = 8
    dim_X = X_train[0].shape
    input_data = Input(shape=(dim_X))
    # Encoder-------------------------------------------
    x = Conv1D(64, kernel_size=5, activation='relu',
               padding='same')(input_data)
    x = MaxPooling1D(5)(x)
    x = Conv1D(32, kernel_size=5, activation='relu', padding='same')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(1, kernel_size=5, activation='relu', padding='same')(x)
    x = MaxPooling1D(5)(x)
    x = Flatten()(x)
    x = Dense(19, activation='relu')(x)
    # -------------------------------------------------------------------

    # latent space--------------------------------------
    encoded = Dense(10, activation='relu')(x)
    encoder = Model(input_data, encoded)
    # -----------------------------------------------------

    # Decoder-------------------------------------------------------
    x = Dense(19, activation='relu')(encoded)
    x = Reshape((19, 1))(x)
    x = UpSampling1D(3)(x)
    x = Conv1D(64, kernel_size=3, activation='relu', padding='same')(x)
    x = UpSampling1D(5)(x)
    x = Conv1D(32, 3, activation='relu', padding='same')(x)
    x = UpSampling1D(5)(x)
    x = Conv1D(64, kernel_size=3, padding='same', activation='relu')(x)
    x = UpSampling1D(5)(x)
    decoded = Conv1D(248, 3, activation='sigmoid', padding='same')(x)
    # ---------------------------------------------------------------

    autoencoder = Model(input_data, decoded)
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

    history = autoencoder.fit(X_train,
                              X_train,
                              epochs=10,
                              batch_size=batch_size,
                              verbose=1)  # Autoencoder

    # summarize loss & val_loss
    plt.plot(history.history['loss'])
    try:
        plt.plot(history.history['val_loss'])
    except:
        print("No validation set provided")

    plt.title('loss')
    plt.ylabel('Loss & val_loss')
    plt.xlabel('epoch')
    plt.legend(['loss', 'val_loss'], loc='upper left')
    plt.show()
    return encoder
示例#21
0
def getConvAutoEncoderModel(input_length, x_train=None, x_test=None):
    #x_train = np.resize(x_train, (x_train.shape[0], input_length, 1)).astype(np.float32)
    #x_test = np.resize(x_test, (x_test.shape[0], input_length, 1)).astype(np.float32)
    input_sample = Input(shape=(input_length, 1))
    x = Convolution1D(32, 11, border_mode='same',
                      activation="tanh")(input_sample)
    #x = MaxPooling1D(pool_length=2, stride=None, border_mode="valid")(x)
    #x = GaussianDropout(0.1)(x)
    x = Convolution1D(32, 5, border_mode='same', activation="tanh")(x)
    x = MaxPooling1D(pool_length=2, stride=None, border_mode="valid")(x)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    x = MaxPooling1D(pool_length=2, stride=None, border_mode="valid")(x)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    encoded = MaxPooling1D(pool_length=2, stride=None, border_mode="valid")(x)

    x = UpSampling1D(length=2)(encoded)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    x = UpSampling1D(length=2)(x)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    x = UpSampling1D(length=2)(x)
    #x = GaussianDropout(0.25)(x)
    x = Convolution1D(32, 5, border_mode='same', activation="tanh")(x)
    #x = UpSampling1D(length=2)(x)
    #x = GaussianDropout(0.1)(x)
    decoded = Convolution1D(32, 11, border_mode='same', activation="tanh")(x)

    autoencoder = Model(input_sample, decoded)
    #autoencoder.summary()
    #encoder = Model(input=input_sample, output=encoded)
    #encoder.summary()
    #encoded_input = Input(shape=(128,))
    #decoder_layers = autoencoder.layers[9:]
    #decoder = Model(input=encoded_input, output=decoder_layers(encoded_input))
    #decoder.summary()

    autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
    autoencoder.summary()
    #plot(autoencoder, to_file='conv_autoencoder.png', show_shapes=True)
    weights_filename = 'weights_conv_maxpooling_exp1.dat'
    if os.path.isfile(weights_filename):
        print 'Loading the model...'
        autoencoder.load_weights(weights_filename)
    else:
        print 'Training the model...'
        trainStart = time.time()
        autoencoder.fit(x_train,
                        x_train,
                        nb_epoch=100,
                        batch_size=100,
                        shuffle=True)  #,
        #validation_data=(x_test, x_test))
        trainEnd = time.time()
        print 'Trained the model in', trainEnd - trainStart, 'seconds'
        print 'Saving the model...'
        autoencoder.save_weights(weights_filename, True)
    return autoencoder
    def build_model(lags, filter_size, prediction_steps=None):
        """
        return: Keras model instance
        """
        input_dim = lags + 1

        def sampling(args):
            """Reparameterization trick by sampling fr an isotropic unit Gaussian.
            # Arguments:
                args (tensor): mean and log of variance of Q(z|X)
            # Returns:
                z (tensor): sampled latent vector
            """

            z_mean, z_log_var = args
            # by default, random_normal has mean=0 and std=1.0
            epsilon = K.random_normal(shape=K.shape(z_mean))
            return z_mean + K.exp(0.5 * z_log_var) * epsilon

        def vae_loss(x, x_decoded_mean, z_log_var, z_mean):
            mse_loss = K.sum(mse(x, x_decoded_mean), axis=1)
            kl_loss = -0.5 * K.sum(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),
                axis=[1, 2])
            return K.mean(mse_loss + kl_loss)

        input_segment = Input(shape=(input_dim, 1))

        # Define encoder part
        x = Conv1D(32, filter_size, activation='relu',
                   padding='same')(input_segment)
        x = MaxPooling1D(2, padding='same')(x)
        x = Conv1D(16, filter_size, activation='relu', padding='same')(x)
        x = MaxPooling1D(2, padding='same')(x)
        x = Conv1D(4, filter_size, activation='relu', padding='same')(x)
        x = MaxPooling1D(2, padding='same')(x)
        z_mean = Dense(8)(x)
        z_log_sigma = Dense(8)(x)
        # Remark: this layer will cause the training to fail on the last batch, if the last batch is shorter
        # so a padding trick must be applied
        encoded = Lambda(sampling)([z_mean, z_log_sigma])

        # Define decoder part
        x = Conv1D(4, filter_size, activation='relu', padding='same')(x)
        x = UpSampling1D(2)(x)
        x = Conv1D(16, filter_size, activation='relu', padding='same')(x)
        x = UpSampling1D(2)(x)
        x = Conv1D(32, filter_size, activation='relu', padding='same')(x)
        x = UpSampling1D(2)(x)
        decoded = Conv1D(1, filter_size, activation='linear',
                         padding='same')(x)
        autoencoder = Model(input_segment, decoded)
        vae_losses = vae_loss(input_segment, decoded, z_log_sigma, z_mean)
        autoencoder.add_loss(vae_losses)
        encoder = Model(input_segment, encoded)
        return (autoencoder, encoder)
def create_decoder(array_of_upsampling, input_for_decoder):
    '''create a decoder'''
    x = UpSampling1D(array_of_upsampling[0])(input_for_decoder)
    x = Conv1D(5, 30, activation=LeakyReLU(alpha=0.2), padding='same')(x)
    x = UpSampling1D(array_of_upsampling[1])(x)
    x = Conv1D(10, 80, activation=LeakyReLU(alpha=0.2), padding='same')(x)
    x = UpSampling1D(array_of_upsampling[2])(x)
    decoded = Conv1D(1, 100, activation=LeakyReLU(alpha=0.2),
                     padding='same')(x)
    return Model(input_for_decoder, decoded)  # create a model
示例#24
0
def autoencoder_model():
    """ design the autoencoder structure.
   """
    # the dimension of latent variable
    encoding_dim = 32
    # input placeholder
    input_data = Input(shape=(164, 4))

    # add noise

    # coding layers
    encoded = Conv1D(32, 2, activation='relu', padding='same')(input_data)
    encoded = MaxPooling1D(pool_size=2)(encoded)
    encoded = Conv1D(64, 3, activation='relu', padding='same')(encoded)
    encoded = MaxPooling1D(pool_size=2)(encoded)
    encoded = Conv1D(128, 4, activation='relu', padding='same')(encoded)

    encoded = Flatten()(encoded)

    # encoder = Model(inputs=input_data, outputs=encoded)
    # print(encoder.summary())

    encoded = Dense(128, activation='relu',\
                     kernel_regularizer=regularizers.l2(0.01))(encoded)
    encoder_output = Dense(encoding_dim, activation='relu')(encoded)

    # encoder = Model(inputs=input_data, outputs=encoder_output)
    # print(encoder.summary())

    # decoding layers
    decoded = Dense(128, activation='relu')(encoder_output)
    decoded = Dense(5248, activation='relu',\
                    kernel_regularizer=regularizers.l2(0.01))(decoded)
    decoded = Reshape((-1, 128))(decoded)

    decoded = Conv1D(64, 3, activation='relu', padding='same')(decoded)
    decoded = UpSampling1D(size=2)(decoded)

    decoded = Conv1D(32, 2, activation='relu', padding='same')(decoded)
    decoded = UpSampling1D(size=2)(decoded)
    decoded_output = Conv1D(4, 2, activation='softmax',
                            padding='same')(decoded)

    # encoder model
    encoder = Model(inputs=input_data, outputs=encoder_output)
    print(encoder.summary())

    # autoencoder model
    autoencoder = Model(inputs=input_data, outputs=decoded_output)
    print(autoencoder.summary())
    autoencoder_loss = binary_crossentropy\
                        (K.flatten(input_data),K.flatten(decoded_output))
    autoencoder.add_loss(autoencoder_loss)  # add the loss to the model

    return encoder, autoencoder
示例#25
0
def train_deep_encoder_decoder():
    # (600, 1)
    inp = Input(shape=(time_series_size, feature_size))

    # (600, 64)
    x = Conv1D(filters=64,
               kernel_size=3,
               activation='relu',
               padding='same',
               input_shape=(time_series_size, feature_size))(inp)
    # (300, 64)
    x = MaxPooling1D(pool_size=2)(x)
    # (300, 32)
    x = Conv1D(filters=32, kernel_size=3, activation='relu', padding='same')(x)
    # (150, 32)
    x = MaxPooling1D(pool_size=2)(x)
    # (150, 1)
    encoded = Conv1D(filters=1,
                     kernel_size=3,
                     activation='relu',
                     padding='same')(x)

    # (150)
    x = Flatten()(encoded)
    # (150, 1)
    x = Reshape((encoding_dim, 1))(x)
    # (150, 32)
    x = Conv1D(filters=32, kernel_size=3, activation='relu', padding='same')(x)
    # (300, 32)
    x = UpSampling1D(2)(x)
    # (300, 64)
    x = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(x)
    # (600, 32)
    x = UpSampling1D(2)(x)
    # (600, 1)
    decoded = Conv1D(filters=1,
                     kernel_size=3,
                     activation='sigmoid',
                     padding='same')(x)

    autoencoder = Model(inputs=inp, outputs=decoded)

    opt = Adam(0.001)
    autoencoder.compile(optimizer=opt, loss='binary_crossentropy')

    autoencoder.fit(train_blink,
                    epochs=3,
                    batch_size=BATCH_SIZE,
                    shuffle=True,
                    validation_data=test_blink)

    autoencoder.save(autoencoder_model_file)
示例#26
0
def getDecoderModel(encoded_length, ae_weights=0):
    encoded = Input(shape=(encoded_length, 1))
    x = UpSampling1D(length=2)(encoded)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    x = UpSampling1D(length=2)(x)
    x = Convolution1D(64, 3, border_mode='same', activation="tanh")(x)
    x = UpSampling1D(length=2)(x)
    x = Convolution1D(32, 5, border_mode='same', activation="tanh")(x)
    #x = UpSampling1D(length=2)(x)
    decoded = Convolution1D(32, 11, border_mode='same', activation="tanh")(x)
    decoder = Model(encoded, decoded)
    #plot(decoder, to_file='conv_decoder.png', show_shapes=True)
    return decoder
示例#27
0
def create_decoder(input_for_decoder, shape):
    '''create a decoder'''
    x = Dense(shape[1]*shape[2])(input_for_decoder)
    x = Reshape((shape[1] ,shape[2]))(x) 
    x = UpSampling1D(2)(x)
    x = Conv1D(15, 20, activation = 'relu', padding='same')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(20, 30, activation = 'relu', padding='same')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(20, 100, activation = 'relu', padding='same')(x)
    x = UpSampling1D(2)(x)
    decoded = Conv1D(1, 100, padding='same')(x)
    return Model(input_for_decoder, decoded) # create a model
示例#28
0
    def define_model(input_image: Input) -> Model:
        '''
        Defines the compiled, pre-fit Keras model

        :param input_image: the Input layer of the Keras model (requires analyzing the input shape)
        :return: the compiled, pre-fit Keras model
        '''

        # encoder
        conv1 = Conv1D(28, (5),
                       activation='relu',
                       padding='same',
                       name='conv1')(input_image)
        pool1 = MaxPool1D(pool_size=(4))(conv1)
        conv2 = Conv1D(64, (5),
                       activation='relu',
                       padding='same',
                       name='conv2')(pool1)
        pool2 = MaxPool1D(pool_size=(4))(conv2)
        conv3 = Conv1D(128, (5),
                       activation='relu',
                       padding='same',
                       name='conv3')(pool2)

        # decoder
        conv4 = Conv1D(128, (5),
                       activation='relu',
                       padding='same',
                       name='conv4')(conv3)
        up1 = UpSampling1D((4))(conv4)
        conv5 = Conv1D(64, (5),
                       activation='relu',
                       padding='same',
                       name='conv5')(up1)
        up2 = UpSampling1D((4))(conv5)
        decoded = Conv1D(28, (5),
                         activation='sigmoid',
                         padding='same',
                         name='conv6')(up2)

        autoencoder = Model(input_image, decoded)

        loss = keras.losses.mse
        optimizer = keras.optimizers.Adam()

        autoencoder.compile(loss=loss, optimizer=optimizer)
        print('Summary\n', autoencoder.summary())
        return autoencoder
示例#29
0
def wave_AE(x_shape):
    # model: raw-wave to raw-wave

    inputs = Input(x_shape)

    # Encoder
    conv = Conv1D(filters=64, kernel_size=3, padding='same',
                  activation='relu')(inputs)  # 16384x16
    conv = MaxPooling1D(pool_size=2, padding='same')(conv)

    conv = Conv1D(filters=64, kernel_size=3, padding='same',
                  activation='relu')(conv)  # 8192x128
    conv = MaxPooling1D(pool_size=2, padding='same')(conv)

    conv = Conv1D(filters=128,
                  kernel_size=3,
                  padding='same',
                  activation='relu')(conv)  # 4096x256
    encoded = MaxPooling1D(pool_size=2, padding='same')(conv)

    # Decoder
    conv = Conv1D(filters=128,
                  kernel_size=3,
                  padding='same',
                  activation='relu')(encoded)  # 2048x64
    conv = UpSampling1D(size=2)(conv)

    conv = Conv1D(filters=64, kernel_size=3, padding='same',
                  activation='relu')(conv)  # 4096x32
    conv = UpSampling1D(size=2)(conv)

    conv = Conv1D(filters=64, kernel_size=3, padding='same',
                  activation='relu')(conv)  # 8192x16
    conv = UpSampling1D(size=2)(conv)

    outputs = Conv1D(filters=1,
                     kernel_size=3,
                     padding='same',
                     activation='sigmoid')(conv)  # 16384x1

    #     print(outputs.shape)
    #     outputs = Reshape(x_shape)(outputs)
    #     print(outputs.shape)

    model = Model(inputs, outputs)
    encoder = Model(inputs, encoded)

    return model, encoder
 def test_UpSampling1D_layer(self):
     x_input = Input(shape=(28, ))
     x = UpSampling1D(size=2)(x_input)
     model = Model(inputs=x_input, outputs=x)
     self.assertEqual(model.get_layer(index=1).size, (2, ))
     self.assertEqual(model.get_layer(index=1).input_shape, (None, 28))
     self.assertEqual(model.get_layer(index=1).output_shape, (None, 56))