Exemple #1
0
def basic_lstm_l1_act(batch_size=8, n_features=1, n_stations=21, seq_len_train=7 * 24,
                      seq_len_pred=6, n_features_pred=1):
    """
    Creates a training with lstm. Inspired by the network of meijer. We input the hourly temperature for
    7 days and we predict the mean temperature at day and at night for the following 7 days.
    :param batch_size: Size of one batch
    :param n_features: Number of features per station
    :param n_stations: Number of stations
    :param seq_len_pred: Sequence length to predict
    :return: training
    """
    seq_len_train = 7 * 24
    seq_len_pred = 6
    input = Input(shape=(seq_len_train, n_features * n_stations),
                  batch_shape=(batch_size, seq_len_train, n_features * n_stations))

    # We have two dense units to preprocess the input. We use 7 units as the idea is that the network abstracts
    # from the high resolution to a per day representation.
    dense1 = Dense(units=14, activation="elu", activity_regularizer=regularizers.l1(0.01))(input)
    dense2 = Dense(units=14, activation="elu", activity_regularizer=regularizers.l1(0.01))(dense1)
    # Lstm keeps track of time dependencies
    lstm = LSTM(units=1, batch_input_shape=(batch_size, seq_len_train, n_stations * n_features))(dense2)
    dense3 = Dense(units=6, activation="tanh", activity_regularizer=regularizers.l1(0.01))(lstm)
    out = Dense(units=seq_len_pred, activation="linear")(dense3)

    model = Model(input, out)
    return model
 def encoder(self, data, encoding_dim=2):
     inputs = Input(shape=(data[0].shape))
     encoded = Dense(30,
                     activation='sigmoid',
                     activity_regularizer=regularizers.l1(10e-5))(inputs)
     encoded = Dense(10,
                     activation='sigmoid',
                     activity_regularizer=regularizers.l1(10e-5))(encoded)
     encoded = Dense(encoding_dim,
                     activation='sigmoid',
                     activity_regularizer=regularizers.l1(10e-5))(encoded)
     model = Model(inputs, encoded)
     return model
Exemple #3
0
    def train(self, sentences_vector: SentencesVector):
        inputer = sentences_vector.inputer
        config = inputer.config
        sequence_input = Input(shape=(config.MAX_SEQUENCE_LENGTH, ),
                               dtype='int32',
                               name="sequence_input")  # 100*1最多100个词组成输入
        embedded_sequences = inputer.getWordEmbedding()(
            sequence_input)  # 句子转为向量矩阵 训练集大小*100*64维
        # model test2
        posi_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                  sentences_vector.position_vec.shape[2]),
                           name="posi_input")
        pos_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                 sentences_vector.pos_vec.shape[2]),
                          name="pos_input")
        embedded_sequences = keras.layers.concatenate(
            [embedded_sequences, posi_input, pos_input])
        c1 = LSTM(100, input_dtype=[100, 182])(embedded_sequences)
        preds = Dense(len(inputer.types),
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.001))(
                          c1)  # softmax分类
        model = Model(inputs=[sequence_input, posi_input, pos_input],
                      outputs=preds)
        print(model.summary())
        adam = optimizers.Adam(lr=0.001, decay=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=["categorical_accuracy"])

        # 如果希望短一些时间可以,epochs调小

        # ModelCheckpoint回调函数将在每个epoch后保存模型到filepath,当save_best_only=True保存验证集误差最小的参数

        checkpoint = ModelCheckpoint(config.model_file_path,
                                     monitor='val_loss',
                                     verbose=1,
                                     mode='min')
        # 当监测值不再改善时,该回调函数将中止训练
        early = EarlyStopping(monitor="categorical_accuracy",
                              mode="min",
                              patience=50)
        metrics = Metrics(sentences_vector)
        # 开始训练
        callbacks_list = [checkpoint, early, metrics]  # early
        # And trained it via:
        model.fit(
            {
                'sequence_input': sentences_vector.sentence_vec,
                'posi_input': sentences_vector.position_vec,
                'pos_input': sentences_vector.pos_vec
            },
            sentences_vector.classifications_vec,
            batch_size=sentences_vector.sentence_vec.shape[1],
            epochs=100,
            # validation_split=0.2,
            # validation_data=({'sequence_input': x_test, 'posi_input': x_test_posi}, y_test),
            callbacks=callbacks_list)
        return model
Exemple #4
0
 def fit(self, X):
     model = Sequential()
     if self.constraint == 'L1':
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode',
                   activity_regularizer=regularizers.l1(1e-6)))
     elif self.constraint == 'L2':
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode',
                   activity_regularizer=regularizers.l2(1e-6)))
     else:
         model.add(
             Dense(self.n_hidden,
                   input_shape=(X.shape[1], ),
                   activation='sigmoid',
                   name='encode'))
     model.add(Dense(X.shape[1], activation='sigmoid'))
     sgd = SGD(lr=self.learn_rate)
     model.compile(optimizer=sgd, loss='mse', metrics=['accuracy'])
     model.fit(X, X, batch_size=32, epochs=self.max_iter, verbose=0)
     self.model = model
     return self
Exemple #5
0
    def get_model(self):
        model = Sequential()

        #input layer
        model.add(
            CuDNNGRU(
                input_shape=(int(cfg.DEMAND_SEQUENCE_LENGTH /
                                 cfg.DEMAND_SAMPLING_RATE),
                             cfg.DEMAND_DATAPOINTS_PER_TS),
                units=cfg.DEMAND_DATAPOINTS_PER_TS,
                kernel_regularizer=regularizers.l1(0.01),
                return_sequences=True,
            ))
        model.add(CuDNNGRU(units=200, return_sequences=True))
        model.add(Dropout(0.2))
        model.add(CuDNNGRU(units=100))
        model.add(Dropout(0.2))
        model.add(Dense(units=100))
        model.add(Dense(units=1))
        model.add(Activation('linear'))

        start = time.time()
        optimizr = rmsprop(lr=0.01)
        model.compile(loss='mae', optimizer=optimizr)
        log.info('compilation time : {}'.format(time.time() - start))
        return model
Exemple #6
0
def tr_net_b3(input_shape_speed,
              word_to_vec_map_speed,
              word_to_index_speed,
              input_shape_bearing,
              word_to_vec_map_bearing,
              word_to_index_bearing,
              no_classes=11):

    speed = Input(shape=input_shape_speed, dtype=np.int32)
    embedding_layer = pre_trained_embedding_layer(word_to_vec_map_speed,
                                                  word_to_index_speed)
    embeddings = embedding_layer(speed)
    X = Bidirectional(LSTM(128, return_sequences=True))(embeddings)
    X = Dropout(0.5)(X)
    X = Bidirectional(LSTM(128))(X)
    X = Dropout(0.5)(X)
    X = Dense(32,
              kernel_regularizer=regularizers.l2(0.001),
              activity_regularizer=regularizers.l1(0.001))(X)
    X = Dropout(0.5)(X)
    X = Dense(16,
              kernel_regularizer=regularizers.l2(0.001),
              activity_regularizer=regularizers.l1(0.001))(X)

    bearing = Input(shape=input_shape_bearing, dtype=np.int32)
    embedding_layer_bearing = pre_trained_embedding_layer(
        word_to_vec_map_bearing, word_to_index_bearing)
    embeddings_bearing = embedding_layer_bearing(bearing)
    B = Bidirectional(LSTM(128, return_sequences=True))(embeddings_bearing)
    B = Dropout(0.5)(B)
    B = Bidirectional(LSTM(128))(B)
    B = Dropout(0.5)(B)
    B = Dense(32,
              kernel_regularizer=regularizers.l2(0.001),
              activity_regularizer=regularizers.l1(0.001))(B)
    B = Dropout(0.5)(B)
    B = Dense(16,
              kernel_regularizer=regularizers.l2(0.001),
              activity_regularizer=regularizers.l1(0.001))(B)

    X = Concatenate()([X, B])

    X = Dense(no_classes)(X)
    X = Activation('softmax')(X)
    model = Model(inputs=[speed, bearing], outputs=X)
    return model
Exemple #7
0
def train(sentences_vector: SentencesVector):
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ),
                           dtype='int32',
                           name="sequence_input")  # 100*1最多100个词组成输入
    embedded_sequences = embedding_layer(
        sequence_input)  # 句子转为向量矩阵 训练集大小*100*64维
    # model test2
    posi_input = Input(shape=(MAX_SEQUENCE_LENGTH, 40), name="posi_input")
    pos_input = Input(shape=(MAX_SEQUENCE_LENGTH, len(all_pos_list)),
                      name="pos_input")
    embedded_sequences = keras.layers.concatenate(
        [embedded_sequences, posi_input, pos_input])
    # conv1d_1s = MultiConv1D(filters=[90, 80, 70, 50, 30, 10], kernel_size=[3, 4, 5], activation='relu')
    c1 = LSTM(100, input_dtype=[100, 182])(embedded_sequences)
    # c1 = MaxPooling1D(pool_size=3)(c1)
    # c1 = Dropout(rate=0.7)(c1)
    # c1 = Flatten()(c1)
    # c1 = Dense(128, activation='relu')(c1)  # 128全连接
    # c1 = Dense(64, activation='relu')(c1)  # 64全连接
    preds = Dense(len(types),
                  activation='softmax',
                  kernel_regularizer=regularizers.l2(0.01),
                  activity_regularizer=regularizers.l1(0.001))(c1)  # softmax分类
    model = Model(inputs=[sequence_input, posi_input, pos_input],
                  outputs=preds)
    print(model.summary())
    adam = optimizers.Adam(lr=0.001, decay=0.0001)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=["accuracy"])

    # 如果希望短一些时间可以,epochs调小

    # ModelCheckpoint回调函数将在每个epoch后保存模型到filepath,当save_best_only=True保存验证集误差最小的参数
    checkpoint = ModelCheckpoint(model_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    # 当监测值不再改善时,该回调函数将中止训练
    early = EarlyStopping(monitor="val_loss", mode="min", patience=50)

    # 开始训练
    callbacks_list = [checkpoint, early]  # early
    # And trained it via:
    model.fit(
        {
            'sequence_input': sentences_vector.sentence_vec,
            'posi_input': sentences_vector.position_vec,
            'pos_input': sentences_vector.pos_vec
        },
        sentences_vector.classifications_vec,
        batch_size=128,
        epochs=500,
        # validation_split=0.2,
        # validation_data=({'sequence_input': x_test, 'posi_input': x_test_posi}, y_test),
        callbacks=callbacks_list)
    return model
Exemple #8
0
def reg_model():
    model = Sequential()
    model.add(
        Dense(64,
              kernel_initializer='normal',
              kernel_regularizer=regularizers.l1(0.001),
              input_shape=STATE_SHAPE,
              activation="relu"))
    model.add(
        Dense(256,
              kernel_initializer='normal',
              kernel_regularizer=regularizers.l1(0.001),
              input_shape=STATE_SHAPE,
              activation="relu"))
    model.add(Dense(1))
    model.compile(loss='mean_absolute_error',
                  optimizer=Adam(lr=LEARNING_RATE),
                  metrics=['mae'])
    return model
Exemple #9
0
    def __autoencoder(size, encoded_size, l1, activation, dropout_rate,
                      use_batch_norm, coefficients):
        # TODO activation fns: relu, elu, selu (AlphaDropout instead of dropout for selu)
        # TODO dropout rate: 0 (no dropout), 0.1, 0.2, 0.3
        # TODO batch norm: YES/NO
        # TODO l1: 0 (no regularization), 1e-7, 1e-6
        # TODO optimizers: rmsprop, adam, nadam
        # TODO coefficients: [4, 8, 12], [4, 8], [2, 4] etc.
        input_layer = Input(shape=(size, ), name='input_conformation')

        x = input_layer
        for i, c in enumerate(coefficients):
            idx = i + 1
            x = Dense(size // c, activation=activation, name="enc_%d" % idx)(x)

            if use_batch_norm:
                x = BatchNormalization(name="enc_%d_batch_norm" % idx)(x)

            if dropout_rate > 0:
                if activation == 'selu':
                    x = AlphaDropout(dropout_rate,
                                     name="enc_%d_dropout" % idx)(x)
                else:
                    x = Dropout(dropout_rate, name="enc_%d_dropout" % idx)(x)

        x = Dense(encoded_size,
                  activation="linear",
                  name="encoded",
                  activity_regularizer=regularizers.l1(l1))(x)

        for i, c in enumerate(reversed(coefficients)):
            idx = len(coefficients) - i
            x = Dense(size // c, activation=activation, name="dec_%d" % idx)(x)

            if use_batch_norm:
                x = BatchNormalization(name="dec_%d_batch_norm" % idx)(x)

            if dropout_rate > 0:
                if activation == 'selu':
                    x = AlphaDropout(dropout_rate,
                                     name="dec_%d_dropout" % idx)(x)
                else:
                    x = Dropout(dropout_rate, name="dec_%d_dropout" % idx)(x)

        decoded = Dense(size, activation="linear",
                        name="decoded_conformation")(x)

        autoencoder = Model(input_layer, decoded)
        autoencoder.compile(optimizer=Adam(lr=0.001),
                            loss='mse',
                            metrics=['mae'])
        autoencoder.summary()

        return autoencoder
Exemple #10
0
    def buildModel(self,
                   seqLength=100,
                   embeddingDim=300,
                   postionShape2=20,
                   types=19,
                   filters=150,
                   kernel_size=3):
        sequence_input = Input(shape=(seqLength, embeddingDim),
                               dtype='float32',
                               name="sequence_input")  # 100*1最多100个词组成输入
        # embedded_sequences = inputer.getWordEmbedding()(sequence_input)  # 句子转为向量矩阵 训练集大小*100*300维
        # model test2
        posi_input = Input(shape=(seqLength, postionShape2), name="posi_input")

        typeInput = Input(shape=(types, ), name="typeInput")
        # 假设输入embedded_sequeces中一个单词的向量长度为d,假设句长为s,关系种类的向量长度为r
        embedded_sequences = keras.layers.concatenate(
            [sequence_input, posi_input])
        embedded_sequences = Dropout(0.5)(embedded_sequences)
        # 下面的部分就是输入部分应用attention,这里不清楚三个矩阵相乘怎么写,主要是bias的问题
        # 在这里加bias应该是一样的,因为后面乘R的时候,R都是one-hot的,所以bias只不过多训练了几个参数,关键位置应该是一样的
        # 这是probs输出的维度应该是s * r
        probs = Dense(types, use_bias=True)(embedded_sequences)
        # 这里是probs的每一项与当前句子的关系做点积,输出维度是 s * 1
        probsWithRela = Dot(axes=[2, 1])([probs, typeInput])
        # 对输出的probs归一化,此时输出的维度仍然是s * 1
        probs_softmax = Activation(activation="softmax")(probsWithRela)
        input_permute = Permute((2, 1))(embedded_sequences)
        # 将probs拷贝d份,每一份与维度倒置的输入相乘,相当于倒置的输入乘了一个对角矩阵,输出维度是d*s
        probs_repeated = RepeatVector(embeddingDim +
                                      postionShape2)(probs_softmax)
        realInput = keras.layers.Multiply()([input_permute, probs_repeated])
        # 将上面的输出倒置过来,维度变成s * d,再传入卷积层
        realInput = Permute((2, 1))(realInput)
        input_attention = Dropout(0.5)(realInput)
        # 这时输出应该是(s - kernel_size) * filters,这时候每个kernal中w的维度是 kernal_size * d
        c1 = Conv1D(filters, kernel_size, use_bias=True,
                    activation='tanh')(input_attention)
        # 这时输出的维度应该只是一个一维的filters
        c1 = MaxPooling1D(pool_size=seqLength - kernel_size)(c1)
        c1 = Permute((2, 1))(c1)
        c1 = Reshape((filters, ))(c1)
        preds = Dense(types,
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.001))(
                          c1)  # softmax分类
        model = Model(inputs=[sequence_input, posi_input, typeInput],
                      outputs=preds)

        print(model.summary())
        return model
Exemple #11
0
def leNet_cnn_l1reg(l1reg, type):
    model = Sequential()

    if type == 'cifar10':
        model.add(
            Conv2D(6, (5, 5),
                   activation='relu',
                   input_shape=(32, 32, 3),
                   kernel_regularizer=regularizers.l1(l1reg)))
    else:
        model.add(
            Conv2D(6, (5, 5),
                   activation='relu',
                   input_shape=(32, 32, 1),
                   kernel_regularizer=regularizers.l1(l1reg)))
    model.add(AveragePooling2D())

    model.add(
        Conv2D(16, (5, 5),
               activation='relu',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(AveragePooling2D())
    model.add(Flatten())

    model.add(
        Dense(units=120,
              activation='relu',
              kernel_regularizer=regularizers.l1(l1reg)))
    model.add(
        Dense(units=84,
              activation='relu',
              kernel_regularizer=regularizers.l1(l1reg)))
    model.add(Dense(units=10, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
def create_base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    input = Input(shape=input_shape)
    #
    x = LSTM(64,
             activation='softsign',
             dropout=0.001,
             recurrent_dropout=0.001,
             stateful=False,
             return_sequences=True)(
                 input)  #stateful=True,可以使得帧组之间产生关联。 记得要在fit时候,shuffle=True。
    x = LSTM(64,
             activation='softsign',
             dropout=0.001,
             recurrent_dropout=0.001,
             stateful=False,
             return_sequences=False)(
                 x)  #stateful=True,可以使得帧组之间产生关联。 记得要在fit时候,shuffle=True。

    x = Dense(64,
              activation='relu',
              activity_regularizer=regularizers.l1(1e-7))(x)
    x = Dropout(0.4)(x)

    x = Dense(64,
              activation='relu',
              activity_regularizer=regularizers.l1(1e-7))(x)
    x = Dropout(0.4)(x)

    x = Dense(64,
              activation='relu',
              activity_regularizer=regularizers.l1(1e-7))(x)
    x = Dropout(0.4)(x)
    #x = Flatten()(x)

    return Model(input, x)
Exemple #13
0
def tr_net_r(input_shape, word_to_vec_map, word_to_index, no_classes=11):
    sentence_indices = Input(shape=input_shape, dtype=np.int32)
    embedding_layer = pre_trained_embedding_layer(word_to_vec_map,
                                                  word_to_index)
    embeddings = embedding_layer(sentence_indices)

    X = Bidirectional(LSTM(128, return_sequences=True))(embeddings)

    X = Dropout(0.5)(X)
    X = Bidirectional(LSTM(256))(X)
    X = Dropout(0.5)(X)
    X = Dense(no_classes,
              kernel_regularizer=regularizers.l2(0.01),
              activity_regularizer=regularizers.l1(0.01))(X)
    X = Activation('softmax')(X)
    model = Model(sentence_indices, X)
    return model
def train(x_train):

    # input placeholder
    input_image = Input(shape=(ENCODING_DIM_INPUT, ))

    # encoding layer
    # *****this code is changed compared with Autoencoder, adding the activity_regularizer to make the input sparse.
    encode_layer1 = Dense(
        ENCODING_DIM_LAYER1,
        activation='relu',
        activity_regularizer=regularizers.l1(10e-6))(input_image)
    # ******************************
    encode_layer2 = Dense(ENCODING_DIM_LAYER2,
                          activation='relu')(encode_layer1)
    encode_layer3 = Dense(ENCODING_DIM_LAYER3,
                          activation='relu')(encode_layer2)
    encode_output = Dense(ENCODING_DIM_OUTPUT)(encode_layer3)

    # decoding layer
    decode_layer1 = Dense(ENCODING_DIM_LAYER3,
                          activation='relu')(encode_output)
    decode_layer2 = Dense(ENCODING_DIM_LAYER2,
                          activation='relu')(decode_layer1)
    decode_layer3 = Dense(ENCODING_DIM_LAYER1,
                          activation='relu')(decode_layer2)
    decode_output = Dense(ENCODING_DIM_INPUT, activation='tanh')(decode_layer3)

    # build autoencoder, encoder
    autoencoder = Model(inputs=input_image, outputs=decode_output)
    encoder = Model(inputs=input_image, outputs=encode_output)

    # compile autoencoder
    autoencoder.compile(optimizer='adam', loss='mse')

    # training
    autoencoder.fit(x_train,
                    x_train,
                    epochs=EPOCHS,
                    batch_size=BATCH_SIZE,
                    shuffle=True)

    return encoder, autoencoder
Exemple #15
0
def mini_VGG_l1reg(l1reg, type):
    model = Sequential()
    if type == 'cifar10':
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   input_shape=(32, 32, 3),
                   kernel_regularizer=regularizers.l1(l1reg)))
    else:  # MNIST
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   input_shape=(32, 32, 1),
                   kernel_regularizer=regularizers.l1(l1reg)))
    model.add(
        Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(MaxPooling2D((2, 2)))

    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(MaxPooling2D((2, 2)))

    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=regularizers.l1(l1reg)))
    model.add(MaxPooling2D((2, 2)))

    model.add(Flatten())
    model.add(
        Dense(512,
              activation='relu',
              kernel_initializer='he_uniform',
              kernel_regularizer=regularizers.l1(l1reg)))
    model.add(
        Dense(200,
              activation='relu',
              kernel_initializer='he_uniform',
              kernel_regularizer=regularizers.l1(l1reg)))
    model.add(Dense(10, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
Exemple #16
0
def main():
    sequence_count = 10
    image_count = 5
    input_count = 4  # 4 input image; 0: Grey, 1: GT, 2: MOG, 3: Optical flow
    height = 160
    width = 240
    kernelshape = 7  # 5x5 kernel size

    teach_size = 30000
    test_size = 8000

    training_data_kernels = get_kernel_data(sequence_count, image_count,
                                            kernelshape, input_count, height,
                                            width)

    network_input = training_data_kernels[:, (0, 2, 3), :, :]
    network_label = training_data_kernels[:, 1,
                                          int(kernelshape / 2) + 1,
                                          int(kernelshape / 2) + 1]

    #------------------
    #      keras
    #------------------

    input_img = Input(shape=(3, kernelshape, kernelshape))

    input1 = Reshape((kernelshape, kernelshape, 3))(input_img)
    layer1 = keras.layers.Conv2D(20, 3)(input1)
    layer2 = keras.layers.Conv2D(20, 3)(layer1)
    layer3 = Flatten()(layer2)
    layer4 = Dense(10,
                   activation="relu",
                   activity_regularizer=regularizers.l2(10e-5))(layer3)
    layer5 = Dense(1,
                   activation="sigmoid",
                   activity_regularizer=regularizers.l1(10e-5))(layer4)
    autoencoder = Model(input_img, layer5)

    #build
    autoencoder.compile(optimizer='rmsprop',
                        loss='binary_crossentropy',
                        metrics=['accuracy'])

    x_train = network_input[:teach_size].astype('float32') / 255.
    x_label = network_label[:teach_size].astype('float32') / 255.
    x_test = network_input[teach_size:teach_size +
                           test_size].astype('float32') / 255.
    x_test_label = network_label[teach_size:teach_size +
                                 test_size].astype('float32') / 255.

    history = autoencoder.fit(x_train,
                              x_label,
                              epochs=5,
                              batch_size=500,
                              shuffle=True,
                              validation_data=(x_test, x_test_label))

    acc = history.history['acc']
    val_acc = history.history['val_acc']

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    plt.figure(figsize=(8, 8))
    plt.subplot(2, 1, 1)
    plt.plot(acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.ylabel('Accuracy')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('Training and Validation Accuracy')

    plt.subplot(2, 1, 2)
    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.ylabel('Cross Entropy')
    plt.ylim([0, max(plt.ylim())])
    plt.title('Training and Validation Loss')
    plt.show()

    # ------------------
    #      keras - end
    # ------------------

    check_result_data = np.zeros(
        (10, 10, input_count, height, width)).astype(np.uint8)
    check_result_data[:, :, :2, :, :] = get_data(10, 10, height, width)
    for seqi in check_result_data:
        fn_mog = cv.bgsegm.createBackgroundSubtractorMOG()
        i = 0
        for imi in seqi:
            if i < 2:
                imi[2] = imi[0]
                imi[3] = imi[0]
                # flow = cv.calcOpticalFlowFarneback(im[0], im[0], None, 0.5, 3, 15, 3, 5, 1.2, 0)
            else:
                imi[2] = seqi[(i - 1), 0]
                imi[3] = seqi[(i - 2), 0]
            i = i + 1

            result_im = np.zeros((height, width))
            kernel_i = int(kernelshape / 2)
            for k in range(kernel_i, height - kernel_i):
                for j in range(kernel_i, width - kernel_i):
                    pred_im = np.array([
                        imi[(0, 2, 3), (k - kernel_i):(k + kernel_i + 1),
                            (j - kernel_i):(j + kernel_i + 1)]
                    ])
                    result_im[k, j] = autoencoder.predict(pred_im)

            print(np.max(result_im))
            print(np.sum(result_im))
            res_im = cv.normalize(result_im, None, 0, 255, cv.NORM_MINMAX)
            cv.imshow("NN 0", res_im.astype(np.uint8))
            cv.imshow("im1", imi[0])
            cv.imshow("im2", imi[2])
            cv.imshow("im3", imi[3])
            cv.imshow("gt", imi[1])
            cv.waitKey()
Exemple #17
0
def train():
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ),
                           dtype='int32',
                           name="sequence_input")  # 100*1最多100个词组成输入
    embedded_sequences = embedding_layer(
        sequence_input)  # 句子转为向量矩阵 训练集大小*100*300维
    # model test2
    posi_input = Input(shape=(MAX_SEQUENCE_LENGTH, 40), name="posi_input")
    embedded_sequences = keras.layers.concatenate(
        [embedded_sequences, posi_input])
    conv1d_1s = MultiConv1D(filters=[90, 80, 70, 50, 30, 10],
                            kernel_size=[3, 4, 5],
                            activation='relu')
    best_model = None
    count = 0
    for conv1d in conv1d_1s:
        c1 = conv1d(embedded_sequences)
        c1 = MaxPooling1D(pool_size=3)(c1)
        c1 = Dropout(rate=0.7)(c1)
        c1 = Flatten()(c1)
        # c1 = Dense(128, activation='relu')(c1)  # 128全连接
        # c1 = Dense(64, activation='relu')(c1)  # 64全连接
        preds = Dense(len(types),
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.001))(
                          c1)  # softmax分类
        model = Model(inputs=[sequence_input, posi_input], outputs=preds)
        print(model.summary())
        adam = optimizers.Adam(lr=0.001, decay=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=["categorical_accuracy"])

        # 如果希望短一些时间可以,epochs调小

        # ModelCheckpoint回调函数将在每个epoch后保存模型到filepath,当save_best_only=True保存验证集误差最小的参数
        file_path = "../data/model/weights_base.temp" + str(count) + ".hdf5"
        checkpoint = ModelCheckpoint(file_path,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')
        # 当监测值不再改善时,该回调函数将中止训练
        early = EarlyStopping(monitor="val_loss", mode="min", patience=50)

        # 开始训练
        callbacks_list = [checkpoint, early]  # early
        x_train, x_train_posi, y_train = get_xy("../data/train.txt")

        # And trained it via:
        model.fit(
            {
                'sequence_input': x_train,
                'posi_input': x_train_posi
            },
            y_train,
            batch_size=128,
            epochs=500,
            validation_split=0.2,
            # validation_data=({'sequence_input': x_test, 'posi_input': x_test_posi}, y_test),
            callbacks=callbacks_list)
        print(model)
        count += 1
        best_model = model
    return best_model
def action():  #此函数是控制整个采样,训练, 匹配测试的过程。
    global filename, total_EEG_number_for_test
    global step, directly_load_model_flag, openfile_name, directly_load_filename, time_steps, directly_load_EEG_flag
    filename = ui.lineEdit.text()  #写入文件名
    time_steps = 1
    if step == 3:  # 匹配测试
        disable_Gui()
        ui.lcdNumber.display('---')
        QApplication.processEvents()
        ui.label.setText("匹配测试中...")
        ui.pushButton.setText("匹配测试中...")

        if directly_load_model_flag == False:
            #autoencoder=load_model(filename+'_key.h5')
            encoder = load_model(filename + '_key.h5')
            print("使用的是刚才训练好的模型:", filename)
        if directly_load_model_flag == True:
            directly_load_EEG_flag = False
            #autoencoder=load_model(directly_load_filename)
            encoder = load_model(directly_load_filename)
            print("使用的是直接读取的已经训练好的模型:", directly_load_filename)


#############################设置初始偏差值 开始##############################################
        if directly_load_EEG_flag == True:
            f = open(directly_load_filename, 'r')  #读取代训练的EEG数据
            print("使用的是预先录取好的EEG文件:", directly_load_filename)
        if directly_load_EEG_flag == False:
            f = open(filename + 'EEG.txt', 'r')  #读取代训练的EEG数据
            print("使用的是刚才录取的EEG文件:", filename)
        All_EEG_data_lines = f.readlines(
        )  #################BUG##############################
        EEG_data_original = np.zeros(
            [total_EEG_number_for_test, total_EEG_Features])
        final_score = 0
        max_score = 0
        original_test_result = np.zeros(
            int(total_EEG_data_number / total_EEG_number_for_test))
        original_test_result_array = np.zeros(
            [total_EEG_number_for_test, encoding_dim])
        for times in range(
                int(total_EEG_data_number /
                    total_EEG_number_for_test)):  #0,1,2,3....
            for k in range(total_EEG_number_for_test):
                EEG_data_one_line = (
                    All_EEG_data_lines[k + times *
                                       total_EEG_number_for_test].split('A')
                )  ####按照字符"A"来截断每一条EEG数据,分割成24小份
                for i in range(total_EEG_Features):
                    if len(
                            EEG_data_one_line
                    ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                        EEG_data_original[k][i] = float(EEG_data_one_line[i])
                    else:
                        EEG_data_original[k][i] = EEG_data_original[k - 1][i]
                        print("发现一处错行!!")
            f.close()
            EEG_data_original = EEG_data_original.astype(
                'float32') / normal_number  # minmax_normalized
            # EEG_data_original -=EEG_data_original.mean(axis=0)
            # EEG_data_original/=EEG_data_original.std(axis=0)

            EEG_data_original = EEG_data_original.reshape(
                int(total_EEG_number_for_test / time_steps),
                int(time_steps * total_EEG_Features))
            x = encoder.predict(EEG_data_original)
            original_test_result[times] = sum(abs(sum(abs((x)))))
            original_test_result_array += x  ##################
            final_score += sum(abs(sum(abs((x)))))
            max_score = max(max_score, sum(abs(sum(abs((x))))))
        matched_average_score = (sum(original_test_result)) / (int(
            total_EEG_data_number / total_EEG_number_for_test))
        matched_median_score = np.median(original_test_result)
        print('==================设置的Median Score================',
              matched_median_score)
        print('==================设置的Average Score================',
              matched_average_score)
        original_test_result_array = original_test_result_array / (int(
            total_EEG_data_number / total_EEG_number_for_test))
        # print('Original_test_result_array.shape==',original_test_result_array.shape)
        # print('Original_test_result_array==',original_test_result_array)
        #############################设置初始偏差值 结束##############################################

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((server_address, 5204))  #5204 为 OPENBCI GUI的缺省服务器段的发送端口
        EEG_data_for_test = np.zeros(
            [total_EEG_number_for_test,
             total_EEG_Features])  # EEG_data_for_test为采样的测试EEG帧。
        match_counter = 0  # 用于计数EEG通过测试的个数。
        max_score = 0
        final_score = 0
        test_times = 10
        test_result = np.zeros(test_times)
        test_result_array = np.zeros([total_EEG_number_for_test, encoding_dim])
        QApplication.processEvents()
        for times in range(test_times):  #测试十次
            for k in range(total_EEG_number_for_test):  #明天写注释!!!!
                EEG_data_one_line = (s.recv(1024).decode('utf-8')).split(
                    'A')  ####按照字符"A"来截断每一条EEG数据,分割成24小份
                for i in range(total_EEG_Features):
                    if len(
                            EEG_data_one_line
                    ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                        EEG_data_for_test[k][i] = float(EEG_data_one_line[i])
                    else:
                        EEG_data_for_test[k][i] = EEG_data_for_test[k - 1][i]
                        print("发现一处错行!!")

            EEG_data_for_test = EEG_data_for_test.astype(
                'float32') / normal_number  # 归一化
            # EEG_data_for_test -=EEG_data_for_test.mean(axis=0)
            # EEG_data_for_test/=EEG_data_for_test.std(axis=0)

            test = encoder.predict(EEG_data_for_test, verbose=1)
            test_result_array += test
            test_result[times] = sum(abs(sum(abs((test)))))
            final_score += test_result[times]
            max_score = max(max_score, test_result[times])

            ui.progressBar.setProperty("value", (times + 1) * 10)
            QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
        test_median = np.median(test_result)
        test_average = (final_score) / (test_times)
        print('==================现在测试出来的Median Score================',
              test_median)
        print('==================现在测试出来的Average Score=================',
              test_average)
        print('test_result_array.shape==', test_result_array.shape)
        test_score_for_display = (
            1 - abs((test_median + test_average) -
                    (matched_median_score + matched_average_score)) /
            (matched_median_score + matched_average_score)) * 100
        if test_score_for_display < 89:
            test_score_for_display -= 20
        test_result_array = test_result_array / (test_times)
        # print('test_result_array.shape==',test_result_array.shape)
        # print('test_result_array==',test_result_array)

        # print('标准欧几里得距离为:', moreBZOSdis(original_test_result_array,test_result_array))
        # print('标准欧几里得距离之和为:', sum(moreBZOSdis(original_test_result_array,test_result_array)))
        original_average_zip_features = sum(
            original_test_result_array) / total_EEG_number_for_test
        tester_average_zip_features = sum(
            test_result_array) / total_EEG_number_for_test
        print('看一下原始加密者的压缩层的平均值:', original_average_zip_features)
        print('看一下测试者的压缩层的平均值:', tester_average_zip_features)
        print(
            '看一下原始人VS测试者的压缩层的平均值的标准欧几里得距离为:',
            moreBZOSdis(original_average_zip_features,
                        tester_average_zip_features))
        test_score_for_display_2 = 0
        progressBar_dim_value = np.zeros(encoding_dim)
        for i in range(encoding_dim):
            test_score_for_display_2 += (
                1 - abs(original_average_zip_features[i] -
                        tester_average_zip_features[i]) /
                abs(original_average_zip_features[i])) / encoding_dim * 100
            progressBar_dim_value[i] = (
                1 - abs(original_average_zip_features[i] -
                        tester_average_zip_features[i]) /
                abs(original_average_zip_features[i])) * 100
            # if progressBar_dim_value[i]<=0:
            #     progressBar_dim_value[i]=0
            print("每一个维度的分数", progressBar_dim_value[i])

        ########################绘制每个维度的匹配度################################
        ui.progressBar_dim_0.setProperty("value",
                                         max(int(progressBar_dim_value[0]), 0))
        ui.progressBar_dim_1.setProperty("value",
                                         max(int(progressBar_dim_value[1]), 0))
        ui.progressBar_dim_2.setProperty("value",
                                         max(int(progressBar_dim_value[2]), 0))
        if encoding_dim >= 4:
            ui.progressBar_dim_3.setProperty(
                "value", max(int(progressBar_dim_value[3]), 0))
        if encoding_dim >= 5:
            ui.progressBar_dim_4.setProperty(
                "value", max(int(progressBar_dim_value[4]), 0))
        if encoding_dim >= 6:
            ui.progressBar_dim_5.setProperty(
                "value", max(int(progressBar_dim_value[5]), 0))
        if encoding_dim >= 7:
            ui.progressBar_dim_6.setProperty(
                "value", max(int(progressBar_dim_value[6]), 0))
        if encoding_dim >= 8:
            ui.progressBar_dim_7.setProperty(
                "value", max(int(progressBar_dim_value[7]), 0))
        if encoding_dim >= 9:
            ui.progressBar_dim_8.setProperty(
                "value", max(int(progressBar_dim_value[8]), 0))

        # for i in range(total_EEG_number_for_test):
        #     print('每一帧的标准欧几里得距离为:',moreBZOSdis(test_result_array[i],sum(original_test_result_array)/total_EEG_number_for_test))

        print('test_score_for_display=', test_score_for_display_2)
        ui.lcdNumber.display(test_score_for_display_2)
        result_text = "测试结束。最终匹配结果为" + str(test_score_for_display_2) + "%"
        ui.label.setText(result_text)
        ui.label.repaint()
        QApplication.processEvents()

        ui.pushButton.setText("重新开始匹配测试")
        ui.pushButton.repaint()

        enable_Gui()

    if step == 2:  #机器学习
        disable_Gui()
        rest_process_bar()
        ui.lcdNumber.display('---')
        QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
        ui.label.setText("开始机器学习你的脑纹。")
        ui.label.repaint()
        ui.pushButton.setText("2-机器学习中...")
        ui.pushButton.repaint()

        ######################################### 开始训练 ######################################

        ####################### 读取代训练的EEG数据############################
        if directly_load_EEG_flag == True:
            f = open(directly_load_filename, 'r')  #读取代训练的EEG数据
            print("step-2:使用的是预先录取好的EEG文件:", directly_load_filename)
        if directly_load_EEG_flag == False:
            f = open(filename + 'EEG.txt', 'r')  #读取代训练的EEG数据
            print("使用的是刚才录取的EEG文件:", filename)

        print(2)
        All_EEG_data_lines = f.readlines()
        EEG_data = np.zeros([total_EEG_data_number, total_EEG_Features])
        time_steps = 1  #强制设定time_steps=1
        for k in range(total_EEG_data_number):
            EEG_data_one_line = (All_EEG_data_lines[k].split('A')
                                 )  ####按照字符"A"来截断每一条EEG数据,分割成24小份
            for i in range(total_EEG_Features):
                if len(
                        EEG_data_one_line
                ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                    EEG_data[k][i] = float(EEG_data_one_line[i])
                else:
                    EEG_data[k][i] = EEG_data[k - 1][i]
                    print("发现一处错行!!")
        f.close()
        EEG_data = EEG_data.astype(
            'float32') / normal_number  # minmax_normalized
        # EEG_data -=EEG_data.mean(axis=0)
        # EEG_data/=EEG_data.std(axis=0)

        EEG_data = EEG_data.reshape(int(total_EEG_data_number / time_steps),
                                    int(time_steps * total_EEG_Features))
        x_train = EEG_data

        ########################开始搭建神经网络#############################
        #120-60-30-15-6-3 似乎不错

        # this is our input placeholder
        input_img = Input(shape=(int(time_steps * total_EEG_Features), ))

        # 编码层
        encoded = Dense(96,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(
                            input_img)  #10e-7
        encoded = Dense(48,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoded = Dense(24,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoded = Dense(12,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoded = Dense(6,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoder_output = Dense(encoding_dim)(encoded)

        # 解码层
        decoded = Dense(
            6, activation='relu',
            activity_regularizer=regularizers.l1(0))(encoder_output)
        decoded = Dense(12,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(24,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(48,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(96,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(int(time_steps * total_EEG_Features),
                        activation='softsign')(decoded)

        # 构建自编码模型
        autoencoder = Model(inputs=input_img, outputs=decoded)
        autoencoder.summary()
        # 构建编码模型
        encoder = Model(inputs=input_img, outputs=encoder_output)

        # compile autoencoder
        #adam=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        autoencoder.compile(optimizer='adam', loss='mse')
        encoder.compile(optimizer='adam', loss='mse')

        # compile autoencoder
        #adam=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        autoencoder.compile(optimizer='adam', loss='mse')
        encoder.compile(optimizer='adam', loss='mse')
        # training
        print(x_train.shape)

        ########################神经网络搭建完毕#############################
        #手动 打乱 x,y 次序。
        # p = np.random.permutation(range(len(x_train)))
        # x_train = x_train[p]
        np.random.shuffle(x_train)

        ################这个tb,是为了使用TensorBoard########################
        tensorBoard = TensorBoard(
            log_dir='./logs',  # log 目录
            histogram_freq=
            1,  # 按照何等频率(epoch)来计算直方图,0为不计算.  必须在fit的时候设定好 validation_split或者validation数据,否则程序死机(此时只能设置为0)。
            batch_size=32,  # 用多大量的数据计算直方图
            write_graph=True,  # 是否存储网络结构图
            write_grads=False,  # 是否可视化梯度直方图
            write_images=False,  # 是否可视化参数
            embeddings_freq=0,
            embeddings_layer_names=None,
            embeddings_metadata=None)

        # 在命令行,先conda activate envs,然后进入本代码所在的目录,然后用 tensorboard --logdir=logs/ 来看log
        # 然后打开chrome浏览器,输入http://localhost:6006/ 来查看
        ########################开始训练#############################
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=int(training_times * 0.1),
                                       verbose=1,
                                       mode='min')
        training_process_bar = Training_process_bar()
        lossHistory = LossHistory()
        #############有一个特别的bug,请注意,就是在一个程序中,如果要训练两次,那么请在fit中拿掉tensorBoard!!!!
        autoencoder.fit(x_train,
                        x_train,
                        validation_split=0.33,
                        epochs=training_times,
                        batch_size=training_batch_size,
                        shuffle=True,
                        callbacks=[early_stopping, training_process_bar])
        #history.loss_plot('epoch')  #matplotlib绘制训练过程,似乎有问题。

        encoder.save(filename + '_key.h5')
        print("把训练好的神经网络保存在:", filename, '_key.h5')

        directly_load_model_flag = False

        x = encoder.predict(
            EEG_data.reshape(int(total_EEG_data_number / time_steps),
                             int(time_steps * total_EEG_Features)))

        print('encoder (x)=', x)
        print('shape (x)=', x.shape)
        ########################训练完毕,并保存训练好的神经网络#############################
        ui.label.setText("你的脑纹锁设置成功!")
        #ui.label.setText("你的脑纹锁设置成功!最终的损失率为"+final_result_loss+"  最终的准确率为"+final_result_acc)
        ui.label.repaint()
        ui.pushButton.setText("开始匹配测试")
        ui.pushButton.repaint()
        ui.progressBar.setProperty("value", 0)
        step = 3
        ui.label_2.setText("目前载入的是" + filename + "的已经训练好的脑纹")
        ui.label_2.repaint()
        enable_Gui()
    if step == 1:  #### 录制脑电波
        ui.lcdNumber.display('---')
        disable_Gui()
        ui.label.setText("开始录制你的脑纹信息,请保持不动。")
        ui.label.update()
        rest_process_bar()
        ui.pushButton.setText("1-录制脑纹中...")
        ui.pushButton.repaint()
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((server_address, 5204))
        f = open(filename + 'EEG.txt', 'w')
        for k in range(total_EEG_data_number):
            EEG_data = (s.recv(1024).decode('utf-8'))
            EEG_data_to_write = EEG_data + '\n'
            f.write(EEG_data_to_write)
            ui.progressBar.setProperty("value",
                                       k / total_EEG_data_number * 100 + 5)
            QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
        f.close()
        directly_load_EEG_flag = False
        step = 2
        ui.label_2.update()
        ui.label.setText("接下来神经网络开始学习并设置你的脑纹。")
        ui.label.update()
        ui.pushButton.setText("2-开始机器学习")
        ui.pushButton.repaint()
        enable_Gui()
        directly_load_EEG_flag = False
    if step == 0:
        ui.lcdNumber.display('---')
        ui.label.setText("返回第一步,请输入你的名字。")
        ui.label.update()
        ui.pushButton.setText("1-重新开始录制脑纹")
        ui.pushButton.repaint()
        ui.label_2.setText("目前没有载入任何已经训练好的脑纹。")
        ui.label_2.repaint()
        step = 1
Exemple #19
0
    loc=0.0, scale=1.0, size=df_train_output.shape)
df_test = df_test_output + 0.5 * np.random.normal(
    loc=0.0, scale=1.0, size=df_test_output.shape)

nb_epoch = 100
batch_size = 12
input_dim = df_train.shape[1]
encoding_dim = 32
hidden_dim = int(encoding_dim / 2)
learning_rate = 1e-3

input_layer = Input(shape=(input_dim, ))
encoder = Dense(
    encoding_dim,
    activation="tanh",
    activity_regularizer=regularizers.l1(learning_rate))(input_layer)
encoder = Dense(hidden_dim, activation="relu")(encoder)
decoder = Dense(hidden_dim, activation='tanh')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)

autoencoder.compile(metrics=['accuracy'],
                    loss='mean_squared_error',
                    optimizer='adam')

cp = ModelCheckpoint(filepath="autoencoder_classifier.h5",
                     save_best_only=True,
                     verbose=0)

tb = TensorBoard(log_dir='./logs',
                 histogram_freq=0,
Exemple #20
0
#endregion
print('max=',EEG_data_B.max())
EEG_data_B = EEG_data_B.astype('float32') / 20      # minmax_normalized

EEG_data_B=EEG_data_B.reshape(int(total_EEG_data_number/time_steps),int(time_steps*total_EEG_Features))
# x_train=EEG_data_B[:int(total_EEG_data_number/time_steps*0.75)]
# x_test=EEG_data_B[int(total_EEG_data_number/time_steps*0.75):]
x_train=EEG_data_B
# 压缩特征维度至2维
encoding_dim = 10
 
# this is our input placeholder
input_img = Input(shape=(int(time_steps*total_EEG_Features),))
 
# 编码层
encoded = Dense(160, activation='relu',activity_regularizer=regularizers.l1(0))(input_img)  #10e-7
encoded = Dense(80, activation='relu',activity_regularizer=regularizers.l1(0))(encoded)
encoded = Dense(40, activation='relu',activity_regularizer=regularizers.l1(0))(encoded)

encoded = Dense(20, activation='relu',activity_regularizer=regularizers.l1(0))(encoded)

encoder_output = Dense(encoding_dim)(encoded)
 
# 解码层
decoded = Dense(20, activation='relu',activity_regularizer=regularizers.l1(0))(encoder_output)

decoded = Dense(40, activation='relu',activity_regularizer=regularizers.l1(0))(decoded)

decoded = Dense(80, activation='relu',activity_regularizer=regularizers.l1(0))(decoded)
decoded = Dense(160, activation='relu',activity_regularizer=regularizers.l1(0))(decoded)
decoded = Dense(int(time_steps*total_EEG_Features), activation='tanh')(decoded)
def main():
    sequence_count = 20
    image_count = 5
    input_count = 6  # 4 input image; 0: Grey, 1: GT, 2: MOG, 3: Optical flow
    height = 160
    width = 240
    kernelshape = 7  # 5x5 kernel size

    teach_size = 300000
    test_size = 80000

    training_data_kernels = get_kernel_data(sequence_count, image_count,
                                            kernelshape, input_count, height,
                                            width)

    network_input = training_data_kernels[:, (4, 2, 3), :, :]
    network_label = training_data_kernels[:, 1,
                                          int(kernelshape / 2) + 1,
                                          int(kernelshape / 2) + 1]

    #------------------
    #      keras
    #------------------

    input_img = Input(shape=(3, kernelshape, kernelshape))

    encoded1 = Dense(50,
                     activation='relu',
                     activity_regularizer=regularizers.l2(10e-3))(input_img)
    encoded2 = Dense(20,
                     activation='relu',
                     activity_regularizer=regularizers.l2(10e-3))(encoded1)
    encoded3 = Dense(15,
                     activation='relu',
                     activity_regularizer=regularizers.l1(10e-3))(encoded2)

    flatten = Flatten()(encoded3)

    decoded = Dense(1, activation='relu')(flatten)

    # this model maps an input to its reconstruction
    autoencoder = Model(input_img, decoded)

    #build
    autoencoder.compile(optimizer='rmsprop',
                        loss='binary_crossentropy',
                        metrics=['accuracy'])

    x_train = network_input[:teach_size].astype('float32') / 255.
    x_label = network_label[:teach_size].astype('float32') / 255.
    x_test = network_input[teach_size:teach_size +
                           test_size].astype('float32') / 255.
    x_test_label = network_label[teach_size:teach_size +
                                 test_size].astype('float32') / 255.

    autoencoder.fit(x_train,
                    x_label,
                    epochs=5,
                    batch_size=500,
                    shuffle=True,
                    validation_data=(x_test, x_test_label))

    # ------------------
    #      keras - end
    # ------------------

    check_result_data = np.zeros(
        (10, 10, input_count, height, width)).astype(np.uint8)
    check_result_data[:, :, :2, :, :] = get_data(10, 10, height, width)
    for seqi in check_result_data:
        fn_mog = cv.bgsegm.createBackgroundSubtractorMOG()
        i = 0
        for imi in seqi:
            imi[2] = fn_mog.apply(imi[0])
            if i <= 0:
                flow = cv.calcOpticalFlowFarneback(imi[0], imi[0], None, 0.5,
                                                   2, 5, 3, 5, 1.2, 0)
                diff = imi[0] - imi[0]
                imi[5] = imi[2]
            else:
                flow = cv.calcOpticalFlowFarneback(seqi[(i - 1),
                                                        0], imi[0], None, 0.5,
                                                   2, 5, 3, 5, 1.2, 0)
                diff = np.abs(imi[0] - seqi[(i - 1), 0])
                imi[5] = (seqi[(i - 1), 2] / 2 + imi[2] / 2).astype(np.uint8)
            # im[4] = diff.astype(np.uint8)
            a = np.einsum("ij,ij->ij", flow[:, :, 0], flow[:, :, 0])
            b = np.einsum("ij,ij->ij", flow[:, :, 1], flow[:, :, 1])
            imi[3] = (cv.normalize(np.sqrt(a + b), None, 0, 255,
                                   cv.NORM_MINMAX)).astype(np.uint8)
            i = i + 1

            result_im = np.zeros((height, width))
            kernel_i = int(kernelshape / 2)
            for k in range(kernel_i, height - kernelshape):
                for j in range(kernel_i, width - kernelshape):
                    pred_im = np.array([
                        imi[(4, 2, 3), (k - kernel_i):(k + kernel_i + 1),
                            (j - kernel_i):(j + kernel_i + 1)]
                    ])
                    result_im[k, j] = autoencoder.predict(pred_im)

            print(np.max(result_im))
            print(np.sum(result_im))
            res_im = cv.normalize(result_im, None, 0, 255, cv.NORM_MINMAX)
            cv.imshow("NN 0", res_im.astype(np.uint8))
            cv.imshow("flow", imi[3])
            cv.imshow("mog", imi[5])
            cv.imshow("image", imi[0])
            cv.imshow("gt", imi[1])
            cv.waitKey()
### Configure the model
model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(X_train.shape[1], X_train.shape[2], 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
print(model.output_shape)
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(
    Dense(128, activation='relu', kernel_regularizer=regularizers.l1(0.01)))
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
hist = model.fit(X_train, y_train, batch_size=32, epochs=10)
print("The model has successfully trained")

model.save('mnist_CNN.h5')
print("Saving the model as mnist.h5")

score = model.evaluate(X_test, y_test, verbose=1)
print(score)
"""
New Things to learn in CNN
embedded_sequences = embedding_layer(sequence_input)

# In[9]:

print('Training model.')
tbCallBack = TensorBoard(log_dir='./Graph/{}/'.format(st),
                         histogram_freq=0,
                         write_graph=True,
                         write_images=True)

# train a 1D convnet with global maxpooling
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = GaussianNoise(0.2)(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu',
           kernel_regularizer=regularizers.l1(0.05))(x)
x = GlobalMaxPooling1D()(x)
x = Dropout(0.5)(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)

model = Model(sequence_input, preds)
model.compile(
    loss='categorical_crossentropy',
    optimizer='adagrad',
    metrics=['acc'],
)

model.fit(x_train,
          y_train,
          batch_size=BATCH_SIZE,
Exemple #24
0
from keras.layers import Input, Dense, regularizers
from keras.models import Model
from time import time
from keras.callbacks import TensorBoard
import matplotlib.pyplot as plt

# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(784, ))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim,
                activation='relu',
                activity_regularizer=regularizers.l1(10e-9))(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])
Exemple #25
0
    def train(self, sentences_vector: SentencesVector):
        inputer = sentences_vector.inputer
        config = inputer.config
        embedded_sequences = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                          config.EMBEDDING_DIM * 3),
                                   dtype='float32',
                                   name="sequence_input")  # 100*1最多100个词组成输入
        # embedded_sequences = inputer.getWordEmbedding()(sequence_input)  # 句子转为向量矩阵 训练集大小*100*300维
        # model test2
        posi_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,
                                  sentences_vector.position_vec.shape[2]),
                           name="posi_input")
        # pos_input = Input(shape=(config.MAX_SEQUENCE_LENGTH,sentences_vector.pos_vec.shape[2]), name="pos_input")
        embedded_sequences = keras.layers.concatenate(
            [embedded_sequences, posi_input])
        c1 = Dropout(rate=0.25)(embedded_sequences)
        c1 = Conv1D(filters=150, kernel_size=3, activation='relu')(c1)
        c1 = MaxPooling1D(pool_size=98)(c1)
        c1 = Dropout(rate=0.25)(c1)
        c1 = Flatten()(c1)
        c1 = Dense(128, activation='relu')(c1)  # 128全连接
        preds = Dense(len(inputer.types),
                      activation='softmax',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.001))(
                          c1)  # softmax分类
        model = Model(inputs=[embedded_sequences, posi_input], outputs=preds)
        print(model.summary())
        adam = optimizers.Adam(lr=0.001, decay=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=["categorical_accuracy"])

        # 如果希望短一些时间可以,epochs调小

        # ModelCheckpoint回调函数将在每个epoch后保存模型到filepath,当save_best_only=True保存验证集误差最小的参数

        checkpoint = ModelCheckpoint(config.model_file_path,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')
        # 当监测值不再改善时,该回调函数将中止训练
        early = EarlyStopping(monitor="val_loss", mode="min", patience=500)

        # 开始训练
        callbacks_list = [checkpoint, early]  # early
        # And trained it via:
        model.fit(
            {
                'sequence_input': sentences_vector.embedded_sequences,
                'posi_input': sentences_vector.position_vec,
                'pos_input': sentences_vector.pos_vec
            },
            sentences_vector.classifications_vec,
            batch_size=50,
            epochs=50000,
            validation_split=0.2,
            # validation_data=({'sequence_input': x_test, 'posi_input': x_test_posi}, y_test),
            callbacks=callbacks_list)
        return model
Exemple #26
0
    def create(self,
               feature_size,
               layer_nodes,
               dropouts,
               no_of_output_class,
               activation='softmax',
               alpha=0.1,
               l2_norm=0,
               l1_norm=0):
        """
        :param layer_nodes: number of of input features
        :param layer_nodes: 1 D numpy array for the the number of nodes in each layer
        :param no_of_output_class: Number of classes in output
        :param activation: type of activation to use
        :param alpha: values for negative gradient
        :param l2_norm: L2 regualrization constant
        :param l1_norm: L1 regualrization constant
        :return:
        """
        layer_count = 0

        input_layer = Input(shape=feature_size)

        # To keep track the last inserted layer
        last_layer = input_layer

        # Iterate to create the layers
        for i in range(layer_nodes.shape[0]):

            if activation != 'LeakyRelu':
                if i == 0:
                    last_layer = Dense(
                        layer_nodes[layer_count],
                        input_dim=feature_size,
                        activation=activation,
                        kernel_initializer=keras.initializers.glorot_normal(
                            seed=None),
                        kernel_regularizer=regularizers.l2(l2_norm),
                        activity_regularizer=regularizers.l1(l1_norm))(
                            last_layer)
                else:
                    last_layer = Dense(
                        layer_nodes[layer_count],
                        activation=activation,
                        kernel_initializer=keras.initializers.glorot_normal(
                            seed=None),
                        kernel_regularizer=regularizers.l2(l2_norm),
                        activity_regularizer=regularizers.l1(l1_norm))(
                            last_layer)
            else:
                if i == 0:
                    last_layer = Dense(
                        layer_nodes[layer_count],
                        input_dim=feature_size,
                        kernel_initializer=keras.initializers.glorot_normal(
                            seed=None),
                        kernel_regularizer=regularizers.l2(l2_norm),
                        activity_regularizer=regularizers.l1(l1_norm))(
                            last_layer)
                    last_layer = LeakyReLU(alpha=alpha)(last_layer)
                else:
                    last_layer = Dense(
                        layer_nodes[layer_count],
                        kernel_initializer=keras.initializers.glorot_normal(
                            seed=None),
                        kernel_regularizer=regularizers.l2(l2_norm),
                        activity_regularizer=regularizers.l1(l1_norm))(
                            last_layer)
                    last_layer = LeakyReLU(alpha=alpha)(last_layer)

            if dropouts[i] > 0:
                last_layer = Dropout(dropouts[i])(last_layer)
            layer_count += 1
        activation = 'sigmoid'
        output_layer = Dense(
            no_of_output_class,
            activation=activation,
            kernel_initializer=keras.initializers.glorot_normal(seed=None),
            kernel_regularizer=regularizers.l2(l2_norm),
            activity_regularizer=regularizers.l1(l1_norm))(last_layer)
        self.model = Model(inputs=input_layer, outputs=output_layer)
Exemple #27
0
def action():  #此函数是控制整个采样,训练, 匹配测试的过程。
    global filename, total_EEG_number_for_test
    global step, directly_load_model_flag, openfile_name, directly_load_filename, time_steps, directly_load_EEG_flag
    filename = ui.lineEdit.text()  #写入文件名
    time_steps = 1
    if step == 3:  # 匹配测试
        disable_Gui()
        ui.lcdNumber.display('---')
        QApplication.processEvents()
        ui.label.setText("匹配测试中...")
        ui.pushButton.setText("匹配测试中...")

        if directly_load_model_flag == False:
            autoencoder = load_model(filename + '_key.h5')
            print("使用的是刚才训练好的模型:", filename)
        if directly_load_model_flag == True:
            autoencoder = load_model(directly_load_filename)
            print("使用的是直接读取的已经训练好的模型:", directly_load_filename)


#############################设置初始偏差值 开始##############################################
        if directly_load_EEG_flag == True:
            f = open(directly_load_filename, 'r')  #读取代训练的EEG数据
            print("使用的是预先录取好的EEG文件:", directly_load_filename)
        if directly_load_EEG_flag == False:
            f = open(filename[:-4] + 'EEG.txt', 'r')  #读取代训练的EEG数据
            print("使用的是刚才录取的EEG文件:", filename)
        All_EEG_data_lines = f.readlines()
        EEG_data_original = np.zeros(
            [total_EEG_number_for_test, total_EEG_Features])
        final_score = 0
        max_score = 0
        test_result = np.zeros(
            int(total_EEG_data_number / total_EEG_number_for_test))
        for test_times in range(
                int(total_EEG_data_number /
                    total_EEG_number_for_test)):  #0,1,2,3....
            for k in range(total_EEG_number_for_test):
                EEG_data_one_line = (
                    All_EEG_data_lines[k + test_times *
                                       total_EEG_number_for_test].split('A')
                )  ####按照字符"A"来截断每一条EEG数据,分割成24小份
                for i in range(total_EEG_Features):
                    if len(
                            EEG_data_one_line
                    ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                        EEG_data_original[k][i] = float(EEG_data_one_line[i])
                    else:
                        EEG_data_original[k][i] = EEG_data_original[k - 1][i]
            f.close()
            EEG_data_original = EEG_data_original.astype(
                'float32') / 10  # minmax_normalized

            EEG_data_original = EEG_data_original.reshape(
                int(total_EEG_number_for_test / time_steps),
                int(time_steps * total_EEG_Features))
            x = autoencoder.predict(EEG_data_original)
            print('Total original-X=', sum(abs(sum(abs((x))))))
            test_result[test_times] = sum(abs(sum(abs((x)))))
            final_score += sum(abs(sum(abs((x)))))
            max_score = max(max_score, sum(abs(sum(abs((x))))))
        # result= model.predict(x,verbose = 1)
        matched_average_score = (final_score - max_score) / (
            int(total_EEG_data_number / total_EEG_number_for_test) - 1)
        matched_median_score = np.median(test_result)
        print('==================Average Score================',
              matched_average_score)
        print('==================Median Score================',
              matched_median_score)
        match_triger = matched_median_score * 1.2
        #############################设置初始偏差值 结束##############################################

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((server_address, 5204))  #5204 为 OPENBCI GUI的缺省服务器段的发送端口
        EEG_data_for_test = np.zeros(
            [total_EEG_number_for_test,
             total_EEG_Features])  # EEG_data_for_test为采样的测试EEG帧。
        match_counter = 0  # 用于计数EEG通过测试的个数。
        max_score = 0
        final_score = 0
        test_times = 10
        test_result = np.zeros(test_times)
        QApplication.processEvents()
        for times in range(test_times):  #测试十次
            for k in range(total_EEG_number_for_test):  #明天写注释!!!!
                EEG_data_one_line = (s.recv(1024).decode('utf-8')).split(
                    'A')  ####按照字符"A"来截断每一条EEG数据,分割成24小份
                for i in range(total_EEG_Features):
                    if len(
                            EEG_data_one_line
                    ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                        EEG_data_for_test[k][i] = float(EEG_data_one_line[i])
                    else:
                        EEG_data_for_test[k][i] = EEG_data_for_test[k - 1][i]

            #print("EEG_data_for_test shape==",EEG_data_for_test.shape)
            EEG_data_for_test = EEG_data_for_test.astype('float32') / 10  # 归一化
            test = autoencoder.predict(EEG_data_for_test, verbose=1)
            test_result[times] = sum(abs(sum(abs((EEG_data_for_test - test)))))

            print('Total original-test=', test_result[times])
            final_score += test_result[times]
            max_score = max(max_score, test_result[times])
            #print('Total final_score=',final_score)
            #print('max_score=',max_score)
            ######################################
            if test_result[times] <= match_triger:
                match_counter = match_counter + 1
            ui.progressBar.setProperty("value", (times + 1) * 10)
            QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
            # result_text="当前帧组的匹配率为:"+str(test[0][0]*100)[:6]+"%"+"  整体匹配率已经达到"+str(match_counter/temp_total_EEG_number_for_test*100)+"%"
            # ui.label.setText(result_text)
            # ui.label.repaint()
            # ui.lcdNumber.display(match_counter/test_times*100)
            # QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。

        # total_EEG_number_for_test=temp_total_EEG_number_for_test #重新恢复全局变量total_EEG_number_for_test的值。
        print('==================Median Score================',
              np.median(test_result))
        print('==================Average Score=================',
              (final_score - max_score) / (test_times - 1))

        test_score_for_display = min(
            ((1 - (np.median(test_result) - matched_median_score) /
              matched_median_score) * 50 +
             (1 - ((final_score - max_score) /
                   (test_times - 1) - matched_average_score) /
              matched_average_score) * 50), 100)
        print('test_score_for_display=', test_score_for_display)
        ui.lcdNumber.display(test_score_for_display)
        result_text = "测试结束。最终匹配结果为" + str(test_score_for_display) + "%"
        ui.label.setText(result_text)
        ui.label.repaint()
        QApplication.processEvents()

        ui.pushButton.setText("重新开始匹配测试")
        ui.pushButton.repaint()

        enable_Gui()

    if step == 2:  #机器学习
        disable_Gui()
        QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
        ui.label.setText("开始机器学习你的脑纹。")
        ui.label.repaint()
        ui.pushButton.setText("2-机器学习中...")
        ui.pushButton.repaint()

        ######################################### 开始训练 ######################################

        ####################### 读取代训练的EEG数据############################
        if directly_load_EEG_flag == True:
            f = open(directly_load_filename, 'r')  #读取代训练的EEG数据
            print("使用的是预先录取好的EEG文件:", directly_load_filename)
        if directly_load_EEG_flag == False:
            f = open(filename + 'EEG.txt', 'r')  #读取代训练的EEG数据
            print("使用的是刚才录取的EEG文件:", filename)

        All_EEG_data_lines = f.readlines()
        EEG_data_B = np.zeros([total_EEG_data_number, total_EEG_Features])
        time_steps = 1  #强制设定time_steps=1
        for k in range(total_EEG_data_number):
            EEG_data_one_line = (All_EEG_data_lines[k].split('A')
                                 )  ####按照字符"A"来截断每一条EEG数据,分割成24小份
            for i in range(total_EEG_Features):
                if len(
                        EEG_data_one_line
                ) == total_EEG_Features + 1:  #这个判断是为了避免有时候读取EEG时候,遇到换行符丢失的现象。
                    EEG_data_B[k][i] = float(EEG_data_one_line[i])
                else:
                    EEG_data_B[k][i] = EEG_data_B[k - 1][i]
        f.close()
        EEG_data_B = EEG_data_B.astype('float32') / 10  # minmax_normalized
        EEG_data_B = EEG_data_B.reshape(
            int(total_EEG_data_number / time_steps),
            int(time_steps * total_EEG_Features))
        x_train = EEG_data_B

        ########################开始搭建神经网络#############################
        # 压缩特征维度至2维
        encoding_dim = 1

        # this is our input placeholder
        input_img = Input(shape=(int(time_steps * total_EEG_Features), ))

        # 编码层
        encoded = Dense(12,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(
                            input_img)  #10e-7
        encoded = Dense(8,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoded = Dense(4,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        encoded = Dense(2,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(encoded)
        # encoded = Dense(20, activation='relu',activity_regularizer=regularizers.l1(0))(encoded)

        encoder_output = Dense(encoding_dim)(encoded)

        # 解码层
        decoded = Dense(
            2, activation='relu',
            activity_regularizer=regularizers.l1(0))(encoder_output)

        decoded = Dense(4,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)

        decoded = Dense(8,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(12,
                        activation='relu',
                        activity_regularizer=regularizers.l1(0))(decoded)
        decoded = Dense(int(time_steps * total_EEG_Features),
                        activation='softsign')(decoded)

        # 构建自编码模型
        autoencoder = Model(inputs=input_img, outputs=decoded)

        # 构建编码模型
        encoder = Model(inputs=input_img, outputs=encoder_output)

        # compile autoencoder
        #adam=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        autoencoder.compile(optimizer='adam', loss='mse')
        encoder.compile(optimizer='adam', loss='mse')

        # compile autoencoder
        #adam=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        autoencoder.compile(optimizer='adam', loss='mse')
        encoder.compile(optimizer='adam', loss='mse')
        # training
        print(x_train.shape)
        early_stopping = EarlyStopping(monitor='loss',
                                       patience=int(training_times * 0.01),
                                       verbose=1,
                                       mode='auto')
        ########################神经网络搭建完毕#############################

        ################这个tb,是为了使用TensorBoard########################
        tb = TensorBoard(
            log_dir='./logs',  # log 目录
            histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算. 好像只能设置为0,否则程序死机。
            batch_size=32,  # 用多大量的数据计算直方图
            write_graph=True,  # 是否存储网络结构图
            write_grads=False,  # 是否可视化梯度直方图
            write_images=False,  # 是否可视化参数
            embeddings_freq=0,
            embeddings_layer_names=None,
            embeddings_metadata=None)

        # 在命令行,先conda activate envs,然后进入本代码所在的目录,然后用 tensorboard --logdir=logs/ 来看log
        # 然后打开chrome浏览器,输入http://localhost:6006/ 来查看
        # 如果出现tensorboard错误,那么需要修改 ...\lib\site-packages\tensorboard\manager.py,其中keras环境下的这个文件,我已经修改好了。
        ########################开始训练#############################
        training_loop_times = 100  # 把进度条分为10分,所以训练也分解为 10次。
        for i in range(training_loop_times):  #这个for,只是为了进度条的显示,所以分成 10次来训练。
            per_step_result = autoencoder.fit(
                x_train,
                x_train,
                epochs=int(max(training_times / training_loop_times, 1)),
                batch_size=training_batch_size,
                shuffle=True,
                callbacks=[early_stopping])
            per_step_result_for_display = str(
                per_step_result.history['loss'][0])
            print("Training loop times:", i, "/100")
            ui.label.setText("开始机器学习你的脑纹,目前的损失率为:" +
                             per_step_result_for_display)
            ui.progressBar.setProperty("value",
                                       (i + 1) * 100 / training_loop_times)
            QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。

        #per_step_result=autoencoder.fit(x_train, x_train, epochs=int(max(training_times,1)), batch_size=training_batch_size, shuffle=True,callbacks=[early_stopping])
        #autoencoder.fit(x_train, x_train, epochs=training_times, batch_size=100, shuffle=True,callbacks=[early_stopping])

        autoencoder.save(filename + '_key.h5')
        print("把训练好的神经网络保存在:", filename, '_key.h5')
        x = autoencoder.predict(
            EEG_data_B.reshape(int(total_EEG_data_number / time_steps),
                               int(time_steps * total_EEG_Features)))
        print('original-X=', sum(abs(EEG_data_B - x)))
        print('Total original-X=', sum(abs(sum(abs((EEG_data_B - x))))))
        directly_load_model_flag = False

        x = encoder.predict(
            EEG_data_B.reshape(int(total_EEG_data_number / time_steps),
                               int(time_steps * total_EEG_Features)))

        print('encoder (x)=', x)
        ########################训练完毕,并保存训练好的神经网络#############################
        ui.label.setText("你的脑纹锁设置成功!")
        #ui.label.setText("你的脑纹锁设置成功!最终的损失率为"+final_result_loss+"  最终的准确率为"+final_result_acc)
        ui.label.repaint()
        ui.pushButton.setText("开始匹配测试")
        ui.pushButton.repaint()
        ui.progressBar.setProperty("value", 0)
        step = 3
        ui.label_2.setText("目前载入的是" + filename + "的已经训练好的脑纹")
        ui.label_2.repaint()
        enable_Gui()
    if step == 1:  #### 录制脑电波
        disable_Gui()
        ui.label.setText("开始录制你的脑纹信息,请保持不动。")
        ui.label.update()
        ui.pushButton.setText("1-录制脑纹中...")
        ui.pushButton.repaint()
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((server_address, 5204))
        f = open(filename + 'EEG.txt', 'w')
        for k in range(total_EEG_data_number):
            EEG_data = (s.recv(1024).decode('utf-8'))
            EEG_data_to_write = EEG_data + '\r\n'
            f.write(EEG_data_to_write)
            ui.progressBar.setProperty("value",
                                       k / total_EEG_data_number * 100 + 5)
            QApplication.processEvents()  #用于PyQt界面的刷新,保证流畅程度。
        f.close()
        step = 2
        ui.label_2.update()
        ui.label.setText("接下来神经网络开始学习并设置你的脑纹。")
        ui.label.update()
        ui.pushButton.setText("2-开始机器学习")
        ui.pushButton.repaint()
        enable_Gui()
        directly_load_EEG_flag = False
    if step == 0:
        ui.label.setText("返回第一步,请输入你的名字。")
        ui.label.update()
        ui.pushButton.setText("1-重新开始录制脑纹")
        ui.pushButton.repaint()
        ui.label_2.setText("目前没有载入任何已经训练好的脑纹。")
        ui.label_2.repaint()
        step = 1
Exemple #28
0
    minimizer = "nadam"
    cost = "mean_squared_error"
elif architecture == 5:
    #Surprisingly good underfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64, activation="relu")(inputs)
    outputs = Dense(1, activation="sigmoid")(layer1)
    epochnum = 1
    minimizer = "rmsprop"
    cost = "mean_squared_error"
elif architecture == 6:
    #Underfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64,
                   activation="relu",
                   activity_regularizer=regularizers.l1(0.0001))(inputs)
    drop1 = Dropout(0.25)(layer1)
    layer2 = Dense(64,
                   activation="relu",
                   activity_regularizer=regularizers.l1(0.0001))(drop1)
    drop2 = Dropout(0.25)(layer2)
    outputs = Dense(1, activation="sigmoid")(drop2)
    epochnum = 64
    minimizer = "nadam"
    cost = "mean_squared_error"
elif architecture == 7:
    #Underfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64,
                   activation="relu",
                   activity_regularizer=regularizers.l2(0.0005))(inputs)