Пример #1
0
    x.append(temp[day:day + 15, :])
    y.append(temp[day + 15:day + 20, 1])
x = np.array(x).reshape((-1, 15, 5))
y = np.array(y).reshape((-1, 5, 1))
x_test = x[-150:]
y_test = y[-150:]
x_val = x[-320:-170]
y_val = y[-320:-170]
x_train = x[:-340]
y_train = y[:-340]

x_train.shape
# %%
# build LSTM model
inputs = layers.Input(shape=(15, 5))
x = layers.LSTM(16, dropout=0.3, recurrent_dropout=0.0,
                return_sequences=False)(inputs)
x = layers.RepeatVector(5)(x)
x = layers.LSTM(16, dropout=0.3, recurrent_dropout=0.0,
                return_sequences=True)(x)
outputs = layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)

model.summary()
plot_model(model, show_shapes=True)
# %%
# fit model
model.compile(loss='mse', optimizer='Adam')
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                            mode='auto',
                                            patience=5,
                                            verbose=1)
Пример #2
0
print('\nAfter reshaping:', X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)



''' HYPERPARAM '''
learning_rate=1e-4
batch_size=70
epochs=20

''' BUILDING THE SECOND MODEL '''

# using Sequential API
model = tf.keras.Sequential()   # instaciate a model using Sequential class 
                                # --> will contruct a pipeline of layers                         
model.add(layers.LSTM(1, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(1))
#model.compile(loss='mean_squared_error', optimizer='adam')
model.compile(optimizer='adam',  # try also adam
              loss='mae')

''' MODEL FIT '''
# fit model
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, Y_test), 
                     verbose=1, shuffle=False)
model.summary()
# Plot Model Loss 
# list all data in history
print(history.history.keys())
Пример #3
0
train_labels = labels_from_data(train_data)

# =========== Model Structure
# https://keras.io/
model = tf.keras.Sequential()

# Each input data point has 2 timesteps, each with 3 features.
# So the input shape (excluding batch_size) is (2, 3), which
# matches the shape of each data point in data_x above.
model.add(layers.Input(shape=(max_timesteps, feature_num)))

# This RNN will return timesteps with 3 features each.
# Because return_sequences=False, it will output 2 timesteps, each
# with 4 features. So the output shape (excluding batch size) is
# (2, 3), which matches the shape of each data point in data_y above.
model.add(layers.LSTM(feature_num, activation=None, return_sequences=False))

# =========== Model Prep
# https://www.tensorflow.org/guide/keras/overview#train_and_evaluate
model.compile(loss=losses.MSE, optimizer=optimizers.SGD(), metrics=[metrics.MSE])

# =========== Model Training
model.fit(train_data, train_labels, epochs=50)

# =========== Model Evaluation
# eval data
eval_data = np.array([ series(max_timesteps, feature_num) for i in range(0, eval_size) ])
eval_labels = labels_from_data(eval_data)
# evaluation
loss_and_metrics = model.evaluate(eval_data, eval_labels, batch_size=128)
print(loss_and_metrics)
x_train = pad_sequences(x_train)
x_train = np.array(x_train)
y_train = np.array(y_train)
vocab_size = np.max(x_train)
print(y_train[0])
print(np.shape(x_train))
print(np.shape(y_train))
## Training data prepared
word_embeddings_dim = 50

input_layer = keras.Input(shape=(None, ), name="InputLayer")
embedding_layer_init = layers.Embedding(vocab_size + 1, word_embeddings_dim)
embedding_layer = embedding_layer_init(input_layer)

BiRNN = layers.Bidirectional(
    layers.LSTM(word_embeddings_dim, return_sequences=True))(embedding_layer)
LSTM_RNN = layers.LSTM(2 * word_embeddings_dim)(BiRNN)
LSTM_RNN = layers.Dropout(0.1)(LSTM_RNN)

first_dense = layers.Dense(2 * word_embeddings_dim,
                           name="firstDense")(LSTM_RNN)
first_dense = layers.Dropout(0.2)(first_dense)
second_dense = layers.Dense(0.5 * word_embeddings_dim,
                            name="secondDense")(first_dense)
second_dense = layers.Dropout(0.2)(second_dense)

output_layer = layers.Dense(2, name="OutputLayer",
                            activation="softmax")(second_dense)

model = keras.Model(inputs=[input_layer], outputs=[output_layer])
Пример #5
0
def cnn_lstm(input_data):
    wapid_dim = input_data[0].shape[1]
    wapid_input_layer = L.Input(shape=(wapid_dim, ))
    wap_emb = L.Embedding(520, 40)(wapid_input_layer)
    wap_emb = L.BatchNormalization()(wap_emb)
    wap_emb = L.Flatten()(wap_emb)

    rssi_f_dim = input_data[1].shape[1]
    rssi_f_input_layer = L.Input(shape=(rssi_f_dim, ))
    rssi_f = L.BatchNormalization()(rssi_f_input_layer)
    rssi_f_feature = L.Dense(16 * 40, activation='relu')(rssi_f)

    input_site_layer = L.Input(shape=(1, ))
    site_emb = L.Embedding(13, 1)(input_site_layer)
    site_emb = L.Flatten()(site_emb)
    site_emb = L.BatchNormalization()(site_emb)
    x = L.Concatenate(axis=1)([wap_emb, rssi_f_feature])
    x = L.BatchNormalization()(x)
    x = L.Dropout(0.2)(x)
    x = L.Dense(256, activation='relu')(x)
    x = L.Dropout(0.1)(x)
    x = L.Dense(128, activation='relu')(x)
    #x = L.Dropout(0.2)(x)
    x = L.Reshape((128, 1))(x)
    # x = L.Reshape((-1, 1))(x)
    x = L.BatchNormalization()(x)

    x = L.Conv1D(32, 3, strides=1, dilation_rate=1,
                 activation='relu')(x)  # input 128, output 126
    y = x
    print("CNN1", x.shape)
    x = L.BatchNormalization()(x)
    x = L.Conv1D(64, 5, strides=2, dilation_rate=1,
                 activation='relu')(x)  # input 126, output (126-5+0)/2+1 = 61
    x = L.BatchNormalization()(x)
    x = L.Conv1D(128, 7, strides=2, dilation_rate=1,
                 activation='relu')(x)  # input 61, output (61-7+0)/2+1 = 28
    x = L.BatchNormalization()(x)
    x = L.Conv1D(64, 9, strides=1, dilation_rate=1,
                 activation='relu')(x)  # input 23, output (28-9+0)/1+1 = 20
    x = L.BatchNormalization()(x)
    x = L.Conv1D(32, 5, strides=1, dilation_rate=1,
                 activation='relu')(x)  # input 20, output (20-5+0)/1+1 = 16
    x = L.BatchNormalization()(x)
    # x = L.Concatenate(axis=1)([x, y])
    # x = L.Dense(64, activation='relu')(x)
    x = L.Conv1D(1, 1, strides=1, dilation_rate=1,
                 activation='relu')(x)  # gloabl average pooling
    print("after conv1D", x.shape)
    x = L.BatchNormalization()(x)
    x = L.LSTM(128, dropout=0, return_sequences=True, activation='sigmoid')(x)
    x = L.LSTM(16, dropout=0, return_sequences=False, activation='sigmoid')(x)
    print("after LSTM ", x.shape)
    x = L.Concatenate(axis=1)([x, site_emb])
    x = L.Dense(64, activation='relu')(x)
    x = L.Dense(16, activation='relu')(x)
    #x = L.Dropout(0.1)(x)
    output_layer_1 = L.Dense(2, name='xy', activation='sigmoid')(x)
    model = M.Model([wapid_input_layer, rssi_f_input_layer, input_site_layer],
                    [output_layer_1])
    model.compile(optimizer=tf.optimizers.Adam(lr=0.001),
                  loss='mse',
                  metrics=['mse'])
    return model
Пример #6
0
def get_noom_retain(input_vector_size, time_size, alpha_lstm_unit, beta_lstm_unit, 
             reshape_size, 
             alpha_activation='relu', 
             beta_activation='relu', 
             kernel_regularizer=0.01, 
             embedding=False,
             return_seq=True):
    
    ''' Build w-Retain model 

    Parameters
    ----------
    input_vector_size: Int. size of vector in input variables. (=shape of shape 0)
    time_size: Int. windowing
    embedding: Bool. whether you want to use embeddign layer at first layer 
    alpha_lstm_unit: Int. Positive integer, dimensionality of the output space.
    alpha_activation: Activation function to use.
        Default: 'relu'
    kernel_regularizer: Positive float. 
        Defualt: 0.01
    reshape_size: 


    Return
    ---------
    Model: keras model object
    
    
    Example
    ---------
    model = W_RETAIN(input_vector_size=45, 
                 time_size=48, 
                 alpha_lstm_unit=5, 
                 beta_lstm_unit=5, 
                 reshape_size=45)
    model.summary()
    model.compile(optimizer=RMSprop(lr=1e-35), loss=weibull_loglik_discrete)
    '''
    
    
    from tensorflow.keras import backend as K
    from tensorflow.keras.layers import Activation
    from tensorflow.keras import layers, Input
    from tensorflow.keras.models import Model
    from tensorflow.keras import regularizers # To use regularization at each layer
    
    
    # Parameters check
    if type(input_vector_size) is not int:
        raise ValueError('Input size was not int')

    
    # Function define
    def reshape(data):
        '''Reshape the context vectors to 3D vector''' # 
        return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size)) # backend.shape(data)[0]

    
    # Reset graph
    K.clear_session()
    
    
    # Main
    if embedding == False:
        pass

    
    # Alpha(time-level weight) 
    alpha = layers.Bidirectional(layers.LSTM(alpha_lstm_unit, activation=alpha_activation, implementation=2, return_sequences=True), name='alpha') 
    alpha_dense = layers.Dense(1, kernel_regularizer=regularizers.l2(kernel_regularizer))

    # Beta (variable level weight)
    beta = layers.Bidirectional(layers.LSTM(beta_lstm_unit, activation=beta_activation, implementation=2, return_sequences=True), name='beta') 
    beta_dense = layers.Dense(input_vector_size, activation='sigmoid', kernel_regularizer=regularizers.l2(kernel_regularizer))

    
    # Model define
    x_input = Input(shape=(time_size, input_vector_size), name='X') # feature
    time_inv_input = Input(shape=(1,5), name='time_inv')
    
    # 2-1. alpha
    alpha_out = alpha(x_input)
    alpha_out = layers.TimeDistributed(alpha_dense, name='alpha_dense')(alpha_out) # bidrection 층의 출력에 한번에 filter size 1짜리 FC을 함. 
    alpha_out = layers.Softmax(axis=1, name='alpha_softmax')(alpha_out) # 논문 본문에 alpha1, alph2, alph3..을 의미

    # 2-2. beta
    beta_out = beta(x_input)
    beta_out = layers.TimeDistributed(beta_dense, name='beta_dense')(beta_out) # 논문 내 beta1 ,beta2, beta3을 의미.

    # 3. Context vector
    c_t = layers.Multiply()([alpha_out, beta_out, x_input])
    context = layers.Lambda(lambda x: K.sum(x, axis=1) , name='Context_lambda_sum')(c_t) 
    context = layers.Lambda(reshape, name='contextReshaped')(context) # Reshape to 3d vector for consistency between Many to Many and Many to One 
    
    
    # concat 
    c_concat = layers.concatenate([context, time_inv_input])
    output = layers.Dense(1, name='output')(c_concat)
    output = layers.Lambda(lambda x: K.reshape(x, shape=(-1,1)))(output)

    model = Model([x_input, time_inv_input] , output)
    return model
Пример #7
0
    if pad:
        encoder_sample_pred_text = pad_to_size(encoder_sample_pred_text, 64)
    encoder_sample_pred_text = tf.cast(encoder_sample_pred_text, tf.float32)
    predictions = model.predict(tf.expand_dims(encoder_sample_pred_text, 0))
    return predictions


'''
sample_text = ('The movie was awesome. The acting was incredible')
predictions = sample_predict(sample_text, pad=True model=model)*100
print('this should be a positive review')
'''

model = keras.Sequential([
    layers.Embedding(encoder.vocab_size, 64),
    layers.Bidirectional(layers.LSTM(64, return_sequences=True)),
    layers.Bidirectional(layers.LSTM(32)),
    layers.Dense(64, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(1, activation='sigmoid')
])

model.compile(loss='binary_crossentropy',
              optimizer=k.optimizers.Adam(1e-4),
              metrics=['accuracy'])

history = model.fit(training_set,
                    epochs=4,
                    validation_data=test_set,
                    validation_steps=30)
Пример #8
0
def rnnClassify100(data_group, y_label,  arch, model_eval, ratio):
    y_label = pd.Series(y_label)


    for index_label in range(1, 6):
        train_index, test_index = Index_split(index_label, model_eval)
        X_train = data_group[train_index]
        X_test = data_group[test_index]
        y_train = y_label[train_index]
        y_test = y_label[test_index]


        n_timesteps = 1800
        n_features = 4
        model = tf.keras.Sequential()
        model.add(layers.LSTM(256, input_shape=(n_timesteps, n_features)))
        model.add(layers.Dropout(0.4))
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.Dense(16, activation='relu'))
        model.add(layers.Dense(1, activation='sigmoid'))

        model.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=['accuracy'])


        fileM = "resAll-Inter-100"
        
        
        
        checkpoint_path = "%s/%s-%s-%s-%d-W%d.hdf5"%(arch,fileM ,arch, model_eval, index_label, ratio )
        cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                     save_best_only=True,
                                                     monitor='val_loss',
                                                     mode='min')

        hist= model.fit(x=X_train,y=y_train,
                epochs=200,
                validation_split=0.25,
                callbacks = [cp_callback],
                batch_size = 256,
                class_weight={1:ratio, 0:1}
                 )

        df = pd.DataFrame.from_dict(hist.history)
        df.to_csv("%s/%s-%s-%s-%d-W%d-history.csv"%(arch,fileM ,arch, model_eval, index_label, ratio ))

        model = tf.keras.models.load_model(checkpoint_path)
        loss, accuracy = model.evaluate(x=X_test, y=y_test)
        with open("%s/%s-%s-%s-%d-W%d-testAccurcy.txt"%(arch,fileM ,arch, model_eval, index_label, ratio), "w+") as f:
            f.write(str(accuracy))
            f.close()

        y_predict = model.predict_classes(data_group)
        y_predict = [ i[0] for i in y_predict.tolist()]
        df = pd.DataFrame.from_dict({"y_index":y_label.index.tolist(), "predict":y_predict})
        df.to_csv("%s/%s-%s-%s-%d-W%d-All.csv"%(arch,fileM ,arch, model_eval, index_label, ratio))


        y_predict = model.predict_classes(X_test)
        y_predict = [ i[0] for i in y_predict.tolist()]
        df = pd.DataFrame.from_dict({"y_index":y_test.index.tolist(), "predict":y_predict})
        df.to_csv("%s/%s-%s-%s-%d-W%d-test.csv"%(arch,fileM ,arch, model_eval, index_label, ratio))
Пример #9
0
def build_ed_model(vocab_in, vocab_out, length_in, length_out, n_units, use_emb=None, unfreeze_emb=True, \
use_dec_emb=None, unfreeze_dec_emb=True):
    if use_emb is not None:
        assert n_units == use_emb.shape[
            -1], "Embedding dimension should match n_units."
    #Encoder
    encoder_input = Input(shape=(None, ))  # vocab_in+1))
    encoder_emb_layer = layers.Embedding(
        vocab_in,
        n_units,
        #input_length=length_in,
        #embeddings_initializer='lecun_uniform',
        mask_zero=True,
        #trainable=True
    )
    encoder_emb = encoder_emb_layer(encoder_input)
    encoder_lstm = layers.LSTM(n_units, return_state=True)
    #encoder_output = layers.RepeatVector(length_out)(encoder_output)
    encoder_output, state_h, state_c = encoder_lstm(encoder_emb)
    encoder_states = [state_h, state_c]
    #Decoder
    decoder_input = Input(shape=(None, ))  # vocab_out+1))
    decoder_emb_layer = layers.Embedding(vocab_out, n_units, mask_zero=True)
    decoder_emb = decoder_emb_layer(decoder_input)
    decoder_lstm = layers.LSTM(n_units,
                               return_sequences=True,
                               return_state=True)
    decoder_outputs, _, _ = decoder_lstm(decoder_emb,
                                         initial_state=encoder_states)
    #decoder_output = layers.TimeDistributed(layers.Dense(vocab_out+1, activation='softmax'))(decoder_output)

    decoder_dense = layers.Dense(vocab_out, activation='softmax')
    decoder_output = decoder_dense(decoder_outputs)

    model = models.Model([encoder_input, decoder_input], decoder_output)
    if use_emb is not None:
        # model.layers[1].set_weights([use_emb])
        # model.layers[1].trainable = unfreeze_emb
        encoder_emb_layer.set_weights([use_emb])
        encoder_emb_layer.trainable = unfreeze_emb
    if use_dec_emb is not None:
        decoder_emb_layer.set_weights([use_dec_emb])
        decoder_emb_layer.trainable = unfreeze_dec_emb

    # Encoder&Decoder for predictions
    encoder_model = models.Model(encoder_input, encoder_states)
    decoder_state_input_h = Input(shape=(n_units, ))
    decoder_state_input_c = Input(shape=(n_units, ))
    decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]

    decoder_emb2 = decoder_emb_layer(decoder_input)

    decoder_outputs2, state_h2, state_c2 = decoder_lstm(
        decoder_emb2, initial_state=decoder_states_inputs)
    decoder_states2 = [state_h2, state_c2]
    decoder_outputs2 = decoder_dense(decoder_outputs2)

    decoder_model = models.Model([decoder_input] + decoder_states_inputs,
                                 [decoder_outputs2] + decoder_states2)

    return model, encoder_model, decoder_model
 def __init__(self):
     super(LSTM, self).__init__()
     # RNN (LSTM) hidden layer.
     self.lstm_layer = layers.LSTM(units=num_units)
     self.out = layers.Dense(num_classes)
Пример #11
0
    def __init__(self,
                 input_shape=(51, 51, 1),
                 conv_layers_dimensions=(16, 32, 64, 128),
                 dense_layers_dimensions=(32, ),
                 rnn_layers_dimensions=(32, ),
                 return_sequences=False,
                 output_activation=None,
                 number_of_outputs=3,
                 **kwargs):

        ### INITIALIZE DEEP LEARNING NETWORK
        network = models.Sequential()

        ### CONVOLUTIONAL BASIS
        for conv_layer_number, conv_layer_dimension in zip(
                range(len(conv_layers_dimensions)), conv_layers_dimensions):

            # add convolutional layer
            conv_layer_name = 'conv_' + str(conv_layer_number + 1)
            if conv_layer_number == 0:
                conv_layer = layers.Conv2D(conv_layer_dimension, (3, 3),
                                           activation='relu',
                                           padding="same",
                                           name=conv_layer_name)
            else:
                conv_layer = layers.Conv2D(conv_layer_dimension, (3, 3),
                                           activation='relu',
                                           padding="same",
                                           name=conv_layer_name)
            if conv_layer_number == 0:
                network.add(
                    layers.TimeDistributed(conv_layer,
                                           input_shape=input_shape))
            else:
                network.add(layers.TimeDistributed(conv_layer))

            # add pooling layer
            pooling_layer_name = 'pooling_' + str(conv_layer_number + 1)
            pooling_layer = layers.MaxPooling2D(2, 2, name=pooling_layer_name)
            network.add(layers.TimeDistributed(pooling_layer))
        # FLATTENING
        flatten_layer_name = 'flatten'
        flatten_layer = layers.Flatten(name=flatten_layer_name)
        network.add(layers.TimeDistributed(flatten_layer))

        # DENSE TOP
        for dense_layer_number, dense_layer_dimension in zip(
                range(len(dense_layers_dimensions)), dense_layers_dimensions):

            # add dense layer
            dense_layer_name = 'dense_' + str(dense_layer_number + 1)
            dense_layer = layers.Dense(dense_layer_dimension,
                                       activation='relu',
                                       name=dense_layer_name)
            network.add(layers.TimeDistributed(dense_layer))

        for rnn_layer_number, rnn_layer_dimension in zip(
                range(len(rnn_layers_dimensions)), rnn_layers_dimensions):

            # add dense layer
            rnn_layer_name = 'rnn_' + str(rnn_layer_number + 1)
            rnn_layer = layers.LSTM(rnn_layer_dimension,
                                    name=rnn_layer_name,
                                    return_sequences=rnn_layer_number <
                                    len(rnn_layers_dimensions) - 1
                                    or return_sequences)
            network.add(rnn_layer)

        # OUTPUT LAYER

        output_layer = layers.Dense(number_of_outputs,
                                    activation=output_activation,
                                    name='output')
        if return_sequences:
            network.add(layers.TimeDistributed(output_layer))
        else:
            network.add(output_layer)

        super().__init__(network, **kwargs)
    def __call__(self, input_t, **kwarg):

        resnet_layer_1 = residual_net(channel_size=32, kernel_size=(3, 3))
        resnet_layer_2 = residual_net(channel_size=64, kernel_size=(3, 3))
        resnet_layer_3 = residual_net(channel_size=128, kernel_size=(3, 3))
        resnet_layer_4 = residual_net(channel_size=256, kernel_size=(3, 3))

        resnet_layer_5 = residual_net(channel_size=32, kernel_size=(3, 3))
        resnet_layer_6 = residual_net(channel_size=64, kernel_size=(3, 3))
        resnet_layer_7 = residual_net(channel_size=128, kernel_size=(3, 3))
        resnet_layer_8 = residual_net(channel_size=256, kernel_size=(3, 3))

        resnet_layer_9 = residual_net(channel_size=512, kernel_size=(3, 3))

        mhab_layer_1 = Multihead_Attention_Block_1()
        mhab_layer_2 = Multihead_Attention_Block_1()
        mhab_layer_3 = Multihead_Attention_Block_1()

        mhab_layer_4 = Multihead_Attention_Block_2()
        mhab_layer_5 = Multihead_Attention_Block_2()
        mhab_layer_6 = Multihead_Attention_Block_2()
        mhab_layer_7 = Multihead_Attention_Block_2()

        cnn_1D_layer = cnn_block_1D_seq(kernel_size=5, strides_size=2)
        cnn_1D_layer_2 = cnn_block_1D(kernel_size=5, strides_size=2)

        x = input_t

        # x = cnn_1D_layer(x)
        # x = cnn_1D_layer_2(x)
        print(x.shape)

        # x = layers.Conv2D(32, (3, 3), padding='same')(x)
        x = resnet_layer_1(x)
        x = layers.MaxPool2D((2, 2), padding='same')(x)
        # x = tf.nn.relu(x)
        x = layers.BatchNormalization()(x)
        # x = layers.Dropout(0.2)(x)

        # x = layers.Conv2D(64, (3, 3), padding='same')(x)
        x = resnet_layer_2(x)
        x = layers.MaxPool2D((2, 2), padding='same')(x)
        # x = tf.nn.relu(x)
        # x = layers.Dropout(0.2)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(0.2)(x)

        # x = layers.Conv2D(128, (3, 3), padding='same')(x)
        x = resnet_layer_3(x)
        x = layers.MaxPool2D((2, 2), padding='same')(x)
        # x = layers.MaxPool2D((4, 4), padding='same')(x)
        # x = tf.nn.relu(x)
        x = layers.BatchNormalization()(x)
        # x = layers.Dropout(0.2)(x)

        # x = layers.Conv2D(256, (3, 3), padding='same')(x)
        x = resnet_layer_4(x)
        x = layers.MaxPool2D((2, 2), padding='same')(x)
        # x = layers.MaxPool2D((4, 4), padding='same')(x)
        # x = tf.nn.relu(x)
        # x = layers.Dropout(0.2)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(0.2)(x)
        # x = layers.LayerNormalization()(x)

        y = self.convert_shape(x)

        # y = mhab_layer_1(y)
        # y = mhab_layer_2(y)
        # y = mhab_layer_3(y)
        y = mhab_layer_4(y)
        y = mhab_layer_5(y)
        y = mhab_layer_6(y)
        # y = mhab_layer_7(y)

        # z = layers.Conv2D(256, (3, 3), padding='same')(x)
        # z = tf.nn.relu(z)
        # z = layers.BatchNormalization()(z)
        # z = layers.Dropout(0.2)(z)

        # z = layers.Conv2D(256, (3, 3), padding='same')(z)
        # z = tf.nn.relu(z)
        # z = layers.Dropout(0.2)(z)
        # z = layers.BatchNormalization()(z)
        # z = layers.Dropout(0.2)(z)

        z = resnet_layer_8(x)
        # z = layers.BatchNormalization()(z)
        # z = resnet_layer_4(z)
        # z = layers.LayerNormalization()(z)

        # z = layers.Dropout(0.2)(z)

        z = self.convert_shape(z)

        x = tf.math.add(y, z)
        # w = tf.math.add(y, z)
        # x = tf.math.add(v, w)
        x = tf.nn.relu(x)

        x = layers.BatchNormalization()(x)
        # x = layers.LayerNormalization()(x)

        # x = layers.LSTM(128, return_sequences=True)(x)
        x = layers.LSTM(256)(x)
        # x = layers.Bidirectional(layers.LSTM(128))(x)

        # x = layers.GlobalAveragePooling2D()(x)
        x = layers.Flatten()(x)
        # x = layers.Dropout(0.2)(x)
        # x = layers.Dense(256, activation='linear')(x)
        x = layers.Dropout(0.3)(x)
        # x = layers.Dense(128, activation='relu')(x)
        # x = layers.Dropout(0.2)(x)
        # x = layers.BatchNormalization()(x)

        return x
Пример #13
0
def CRNN_model(crnn_mode):
    batchSize = ArchitectureConfig.BATCH_SIZE
    maxTextLen = ArchitectureConfig.MAX_TEXT_LENGTH
    img_size = ArchitectureConfig.IMG_SIZE

    input_data = Input(name='the_input',
                       shape=img_size + (1, ),
                       dtype='float32')

    conv_1 = layers.Conv2D(filters=64,
                           kernel_size=5,
                           padding='same',
                           activation='relu')(input_data)

    pool_1 = layers.MaxPool2D(pool_size=2, padding='same')(conv_1)

    conv_2 = layers.Conv2D(filters=128,
                           kernel_size=5,
                           padding='same',
                           activation='relu')(pool_1)

    pool_2 = layers.MaxPool2D(pool_size=(1, 2), strides=(1, 2),
                              padding='same')(conv_2)

    conv_3 = layers.Conv2D(filters=128,
                           kernel_size=3,
                           padding='same',
                           activation='relu')(pool_2)

    batch_norm_3 = layers.BatchNormalization()(conv_3)

    batch_norm_3 = layers.Activation('relu')(batch_norm_3)

    pool_3 = layers.MaxPool2D(pool_size=2, strides=(2, 2),
                              padding='same')(batch_norm_3)

    conv_4 = layers.Conv2D(filters=256,
                           kernel_size=3,
                           padding='same',
                           activation='relu')(pool_3)

    conv_5 = layers.Conv2D(filters=256,
                           kernel_size=3,
                           padding='same',
                           activation='relu')(conv_4)

    pool_5 = layers.MaxPool2D(pool_size=(1, 2), strides=(1, 2),
                              padding='same')(conv_5)

    conv_6 = layers.Conv2D(filters=512,
                           kernel_size=3,
                           padding='same',
                           activation='relu')(pool_5)

    batch_norm_6 = layers.BatchNormalization()(conv_6)

    batch_norm_6 = layers.Activation('relu')(batch_norm_6)

    pool_6 = layers.MaxPool2D(pool_size=(1, 2), strides=(1, 2),
                              padding='same')(batch_norm_6)

    conv_7 = layers.Conv2D(filters=512,
                           kernel_size=3,
                           padding='same',
                           activation='relu')(pool_6)

    pool_7 = layers.MaxPool2D(pool_size=(1, 2), strides=(1, 2),
                              padding='same')(conv_7)

    cnnOut = tf.squeeze(pool_7, axis=[2])

    blstm_8 = layers.Bidirectional(
        layers.LSTM(units=256, return_sequences=True))(cnnOut)

    blstm_9 = layers.Bidirectional(
        layers.LSTM(units=256, return_sequences=True))(blstm_8)

    # transforms RNN output to character activations:
    # no unique labels
    inner = layers.Dense(216, kernel_initializer='he_normal',
                         name='dense2')(blstm_9)
    y_pred = layers.Activation('softmax', name='softmax')(inner)

    if crnn_mode == CRNN_MODE.inference:
        return Model(inputs=input_data, outputs=y_pred)

    Model(inputs=input_data, outputs=y_pred).summary()

    labels = Input(name='the_labels',
                   shape=[ArchitectureConfig.MAX_TEXT_LENGTH],
                   dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')

    #Loss function
    loss_out = layers.Lambda(ctc_lambda_func, output_shape=(1, ), name='ctc')(
        [y_pred, labels, input_length, label_length])

    model = Model(inputs=[input_data, labels, input_length, label_length],
                  outputs=loss_out)

    y_func = backend.function([input_data], [y_pred])
    return model, y_func
def lstm_layer(hidden_dim, dropout):
    return L.Bidirectional(
        L.LSTM(hidden_dim,
               dropout=dropout,
               return_sequences=True,
               kernel_initializer='orthogonal'))
Пример #15
0
max_features = 10000    # number of words to consider as features
maxlen = 500    # cuts off texts after this number of words (among the max_features most common words)

(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# reverse sequences
x_train = [x[::-1] for x in x_train]
x_test = [x[::-1] for x in x_test]

# pad sequences
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

input_tensor = layers.Input((maxlen,))
kmodel = layers.Embedding(max_features, 128)(input_tensor)
kmodel = layers.LSTM(32)(kmodel)
output_tensor = layers.Dense(1, activation='sigmoid')(kmodel)
model = models.Model(input_tensor, output_tensor)

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)
max(history.history['val_acc'])
'''
We get nearly the same performance as when we used the non-reversed order, thus
validating our assumption that in natural language - the order does matter but
which order - does not matter.

To instantiate a bidirectional RNN in Keras we use the `bidirectional` layer,
which takes as its first argument a recurrent layer instance. It creates a second,
separate instance of this recurrent layer and uses one instance for processing the input
sequences in chronological order and the other instance for processing the input sequences
Пример #16
0
# Get the dummy data
n_samples = 1000
n_words = 100
text = np.random.randint(
    1, text_vocabulary_size, size=(n_samples, n_words)
)
question = np.random.randint(
    1, question_vocabulary_size, size=(n_samples, n_words)
)
answers = np.random.randint(0, 1, size=(n_samples, answer_vocabulary_size))

# First Input
text_input = Input(shape=(None,), dtype='int32', name='text')
embedded_text = layers.Embedding(text_vocabulary_size, 64)(text_input)
encoded_text = layers.LSTM(32)(embedded_text)

# Second Input
question_input = Input(shape=(None,), dtype='int32', name='question')
embedded_question = layers.Embedding(question_vocabulary_size, 32)(
    question_input
)
encoded_question = layers.LSTM(16)(embedded_question)

# Concatenate output
concatenated = layers.concatenate([encoded_text, encoded_question], axis=-1)
answer = layers.Dense(answer_vocabulary_size, activation='softmax')(
    concatenated
)

# Compile
Пример #17
0
    def __init__(self,
                 n_tensors=20,
                 n_hidden=50,
                 n_layers=2,
                 dropout_rate=0.5,
                 rnn_type='gru',
                 **kwargs):
        super(CMLA, self).__init__(**kwargs)
        self.n_tensors = n_tensors
        self.n_hidden = n_hidden
        self.n_layers = n_layers
        self.dropout_rate = dropout_rate
        self.rnn_type = rnn_type

        self.dot_layer = layers.Dot(axes=0)

        if self.rnn_type == 'gru':
            self.rnn_aspect_layer = layers.GRU(
                units=2 * self.n_tensors,
                recurrent_activation='sigmoid',
                return_sequences=True,
                use_bias=False,
                kernel_initializer=initializers.random_uniform(-0.2, 0.2),
                recurrent_initializer=initializers.random_uniform(-0.2, 0.2),
                dropout=self.dropout_rate)

            self.rnn_opinion_layer = layers.GRU(
                units=2 * self.n_tensors,
                recurrent_activation='sigmoid',
                return_sequences=True,
                use_bias=False,
                kernel_initializer=initializers.random_uniform(-0.2, 0.2),
                recurrent_initializer=initializers.random_uniform(-0.2, 0.2),
                dropout=self.dropout_rate)

        elif self.rnn_type == 'lstm':
            self.rnn_aspect_layer = layers.LSTM(
                units=2 * self.n_tensors,
                recurrent_activation='sigmoid',
                return_sequences=True,
                use_bias=False,
                kernel_initializer=initializers.random_uniform(-0.2, 0.2),
                recurrent_initializer=initializers.random_uniform(-0.2, 0.2),
                dropout=self.dropout_rate)

            self.rnn_opinion_layer = layers.LSTM(
                units=2 * self.n_tensors,
                recurrent_activation='sigmoid',
                return_sequences=True,
                use_bias=False,
                kernel_initializer=initializers.random_uniform(-0.2, 0.2),
                recurrent_initializer=initializers.random_uniform(-0.2, 0.2),
                dropout=self.dropout_rate)

        elif self.rnn_type == 'bilstm':
            self.rnn_aspect_layer = layers.Bidirectional(
                layers.LSTM(units=2 * self.n_tensors,
                            recurrent_activation='sigmoid',
                            return_sequences=True,
                            use_bias=False,
                            kernel_initializer=initializers.random_uniform(
                                -0.2, 0.2),
                            recurrent_initializer=initializers.random_uniform(
                                -0.2, 0.2),
                            dropout=self.dropout_rate))

            self.rnn_opinion_layer = layers.Bidirectional(
                layers.LSTM(units=2 * self.n_tensors,
                            recurrent_activation='sigmoid',
                            return_sequences=True,
                            use_bias=False,
                            kernel_initializer=initializers.random_uniform(
                                -0.2, 0.2),
                            recurrent_initializer=initializers.random_uniform(
                                -0.2, 0.2),
                            dropout=self.dropout_rate))

        elif self.rnn_type == 'bigru':
            self.rnn_aspect_layer = layers.Bidirectional(
                layers.GRU(units=2 * self.n_tensors,
                           recurrent_activation='sigmoid',
                           return_sequences=True,
                           use_bias=False,
                           kernel_initializer=initializers.random_uniform(
                               -0.2, 0.2),
                           recurrent_initializer=initializers.random_uniform(
                               -0.2, 0.2),
                           dropout=self.dropout_rate))

            self.rnn_opinion_layer = layers.Bidirectional(
                layers.GRU(units=2 * self.n_tensors,
                           recurrent_activation='sigmoid',
                           return_sequences=True,
                           use_bias=False,
                           kernel_initializer=initializers.random_uniform(
                               -0.2, 0.2),
                           recurrent_initializer=initializers.random_uniform(
                               -0.2, 0.2),
                           dropout=self.dropout_rate))
        else:
            raise ValueError(
                "Unknown rnn type. Valid types are gru, lstm, bilstm, or bigru."
            )
Пример #18
0
    data_y.append(_y)
print(_x, "->", _y)

train_size = int(len(data_y) * 0.7)
train_x = np.array(data_x[0:train_size])
train_y = np.array(data_y[0:train_size])

test_size = len(data_y) - train_size
test_x = np.array(data_x[train_size:len(data_x)])
test_y = np.array(data_y[train_size:len(data_y)])

# 모델 생성
model = Sequential()
model.add(
    layers.LSTM(units=10,
                activation='relu',
                return_sequences=True,
                input_shape=(window_size, data_size)))
model.add(layers.Dropout(0.1))
model.add(layers.LSTM(units=10, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(units=1))
model.summary()

model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(train_x, train_y, epochs=20, batch_size=200)
pred_y = model.predict(test_x)

# Visualising the results
plt.figure()
plt.plot(test_y, color='red', label='real SEC stock price')
plt.plot(pred_y, color='blue', label='predicted SEC stock price')
Пример #19
0
        x2,
        x3,
        x4,
        x5,
        x6,
        x7,
        x8,
    ], -2)

    print("**************************", x)

    x = layers.BatchNormalization()(x)

    x = layers.Bidirectional(
        layers.LSTM(128,
                    return_sequences=True,
                    dropout=0.25,
                    recurrent_dropout=0.25))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Bidirectional(
        layers.LSTM(128,
                    return_sequences=True,
                    dropout=0.25,
                    recurrent_dropout=0.25))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Bidirectional(
        layers.LSTM(128, dropout=0.25, recurrent_dropout=0.25))(x)
    x = layers.BatchNormalization()(x)

    # x = layers.LSTM(256, return_sequences=True, dropout=0.25, recurrent_dropout=0.25)(x)
    # x = layers.BatchNormalization()(x)
    # x = layers.LSTM(256, return_sequences=True, dropout=0.25, recurrent_dropout=0.25)(x)
"""

"""
## Mask propagation in the Functional API and Sequential API

When using the Functional API or the Sequential API, a mask generated by an `Embedding`
or `Masking` layer will be propagated through the network for any layer that is
capable of using them (for example, RNN layers). Keras will automatically fetch the
mask corresponding to an input and pass it to any layer that knows how to use it.

For instance, in the following Sequential model, the `LSTM` layer will automatically
receive a mask, which means it will ignore padded values:
"""

model = keras.Sequential(
    [layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32),]
)

"""
This is also the case for the following Functional API model:
"""

inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)

model = keras.Model(inputs, outputs)

"""
## Passing mask tensors directly to layers
"""
# 词汇个数: 8185
# 向量化文本: [6351, 7961, 7, 703, 3108, 999, 999, 7975, 2449]
# 6351 --> welcome
# 7961 -->  
# 7 --> to 
# 703 --> ge
# 3108 --> ek
# 999 --> tu
# 999 --> tu
# 7975 --> .
# 2449 --> com

# 搭建 RNN 模型
model = Sequential([
    layers.Embedding(tokenizer.vocab_size,64), #经过Embedding层的转换,将产生大小固定的64的向量,而这个转换时可训练的,经过足够的训练之后,相似语义的句子将产生相似的向量
    layers.Bidirectional(layers.LSTM(64)), #在LSTM层外面套一个壳(层封装器,layer wrappers),这是RNN的双向封装器,用于对序列进行前向和后向计算
    layers.Dense(64,activation='relu'),
    layers.Dense(1,activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',
    metrics=['accuracy'])
history1 = model.fit(train_ds,epochs=3,validation_data=test_ds)
loss,acc = model.evaluate(test_ds)
print('准确率:',acc)#0.81039  使用一层LSTM
#将训练结果可视化
    #解决中文乱码问题
plt.rcParams['font.san-serif'] = ['SimHei']#设置字体
plt.rcParams['axes.unicode_minus'] = False #字符显示
plt.rcParams['font.size'] = 20
def plot_graphs(history,name):
    plt.plot(history.history[name])
 def __init__(self, **kwargs):
     super(MyLayer, self).__init__(**kwargs)
     self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
     self.lstm = layers.LSTM(32)
Пример #23
0
	Y[i] = y_sequence_ohe

print(X.shape)
print(Y.shape)



batch_size = 100
hidden_units = 700
n_epoch= 300
dropout = 0.4

tf.random.set_seed(42)

model = models.Sequential()
model.add(layers.LSTM(hidden_units, input_shape=(None, n_vocab), return_sequences=True, dropout=dropout))
model.add(layers.LSTM(hidden_units, return_sequences=True, dropout=dropout))

model.add(layers.TimeDistributed(layers.Dense(n_vocab, activation='softmax')))

optimizer = optimizers.RMSprop(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=optimizer)


print(model.summary())




def generate_text(model, gen_length, n_vocab, index_to_char):
    """
    model.add(
        td(
            layers.Conv2D(filters=256,
                          kernel_size=(3, 3),
                          padding='same',
                          activation=tf.nn.relu)))
    model.add(
        td(
            layers.Conv2D(filters=256,
                          kernel_size=(3, 3),
                          padding='same',
                          activation=tf.nn.relu)))
    model.add(td(layers.MaxPool2D(pool_size=(3, 3), strides=2)))
    model.add(td(layers.Flatten()))
    model.add(td(layers.Dense(256, activation='relu')))
    model.add(layers.LSTM(50, activation='tanh'))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(syn_head_gen.num_bins, activation='softmax'))

    #model = load_model('./resnetModels/042619_055142/wholeModel.h5')
    params = {
        'batch_size': args.batch_size,
        'time_steps': args.time_step,
        'input_shape': (64, 64, 3),
        'num_classes': syn_head_gen.num_bins,
        'shuffle': True
    }

    training_generator = SynHeadSequencer(full_train_images, full_train_yaw,
                                          full_train_bg,
                                          full_train_video_start, **params)
Пример #25
0
def build_model():
    # Inputs to the model
    input_img = layers.Input(shape=(img_width, img_height, 1),
                             name="image",
                             dtype="float32")
    labels = layers.Input(name="label", shape=(None, ), dtype="float32")

    # First conv block
    x = layers.Conv2D(
        32,
        (3, 3),
        activation="relu",
        kernel_initializer="he_normal",
        padding="same",
        name="Conv1",
    )(input_img)
    x = layers.MaxPooling2D((2, 2), name="pool1")(x)

    # Second conv block
    x = layers.Conv2D(
        64,
        (3, 3),
        activation="relu",
        kernel_initializer="he_normal",
        padding="same",
        name="Conv2",
    )(x)
    x = layers.MaxPooling2D((2, 2), name="pool2")(x)

    # We have used two max pool with pool size and strides 2.
    # Hence, downsampled feature maps are 4x smaller. The number of
    # filters in the last layer is 64. Reshape accordingly before
    # passing the output to the RNN part of the model
    new_shape = ((img_width // 4), (img_height // 4) * 64)
    x = layers.Reshape(target_shape=new_shape, name="reshape")(x)
    x = layers.Dense(64, activation="relu", name="dense1")(x)
    x = layers.Dropout(0.2)(x)

    # RNNs
    x = layers.Bidirectional(
        layers.LSTM(128, return_sequences=True, dropout=0.25))(x)
    x = layers.Bidirectional(
        layers.LSTM(64, return_sequences=True, dropout=0.25))(x)

    # Output layer
    x = layers.Dense(len(char_to_num.get_vocabulary()) + 1,
                     activation="softmax",
                     name="dense2")(x)

    # Add CTC layer for calculating CTC loss at each step
    output = CTCLayer(name="ctc_loss")(labels, x)

    # Define the model
    model = keras.models.Model(inputs=[input_img, labels],
                               outputs=output,
                               name="ocr_model_v1")
    # Optimizer
    opt = keras.optimizers.Adam()
    # Compile the model and return
    model.compile(optimizer=opt)
    return model
Пример #26
0
def main():
    path_net = os.path.join('/', 'public', 'home', 'liqi', 'data', 'analysis', 'transcribe_CNN')
    # path_net = os.path.join('Z:', 'datas', 'analysis')

    datas = pd.read_csv(os.path.join(path_net, "save_CNN_text_2_datas.csv"))

    text_xys = Kc.DataFrameToXY(datas, ["HistoryOfPastIllness"], "MajorDiagnosisCoding",
                                manualFilePath=os.path.join(path_net, r"CodingToClass_1to16 - new 多分类.csv"))

    text_xys = text_xys[text_xys["MajorDiagnosisCoding"] != "-1"]

    # 字符串向量化
    text_x_all = ""
    text_x_thus = []
    text_x_d2v_train = []
    thu1 = thulac.thulac(seg_only=True)  # 默认模式
    for i in range(len(text_xys["HistoryOfPastIllness"])):
        try:
            index_text_y = text_xys["HistoryOfPastIllness"][i]
        except KeyError:
            index_text_y = ""

        # 分词
        temp_thu1_rt = thu1.cut(index_text_y, text=True)
        document = gensim.models.doc2vec.TaggedDocument(temp_thu1_rt, tags=[i])
        text_x_d2v_train.append(document)

    model = gensim.models.Doc2Vec(text_x_d2v_train, size=50, window=8, min_count=5, workers=4)
    model.save(os.path.join(path_net, r'model_text_2.model'))
    text_x_vds = list(map(lambda x: np.array(x), model.docvecs.vectors_docs.tolist()))
    text_xys.insert(1, 'HistoryOfPastIllness_vec', text_x_vds)
    text_xys.drop(['HistoryOfPastIllness'], axis=1, inplace=True)

    text_xys_np = text_xys.values

    x_train, y_train, x_test, y_test = Kc.SplitGroup(text_xys_np)
    y_train = y_train.astype(np.float64) - 1  # 没有得到的消除,只有1和2两类
    y_test = y_test.astype(np.float64) - 1  # 没有得到的消除,只有1和2两类
    x_train = np.array(list(map(lambda x: x[0].tolist(), x_train.tolist())))
    x_test = np.array(list(map(lambda x: x[0].tolist(), x_test.tolist())))

    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
    x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)

    numclasses = y_train.max() + 1
    onesharp = len(x_train[0])

    # 创建神经网络
    print("搭建神经网络")
    model = tf.keras.Sequential(
        [layers.LSTM(16, return_sequences=True, input_shape=(onesharp, 1)),
         layers.Conv1D(32, 5, padding='same', activation=tf.nn.relu),
         layers.Conv1D(64, 5, padding='same', activation=tf.nn.relu),
         layers.MaxPooling1D(5),
         layers.Dropout(0.25),
         layers.LSTM(64, return_sequences=True),
         layers.Conv1D(128, 5, padding='same', activation=tf.nn.relu),
         layers.Conv1D(64, 5, padding='same', activation=tf.nn.relu),
         layers.MaxPooling1D(5),
         layers.Dropout(0.25),
         layers.LSTM(64, return_sequences=True),
         layers.Conv1D(32, 5, padding='same', activation=tf.nn.relu),
         layers.GlobalAveragePooling1D(),
         layers.Dropout(0.5),
         layers.Dense(numclasses, activation=tf.nn.softmax)])
    model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())
    print("训练神经网络")
    metrics = Kc.Metrics(validation_data=(x_test, y_test))
    history = model.fit(x_train, y_train, batch_size=400, validation_data=(x_test, y_test), epochs=500,
                        callbacks=[metrics, TensorBoard(log_dir=os.path.join('logs', '{}').format("模型名-{}".format(int(time.time()))))])

    nowtime = str(int(time.time()))
    try:
        plt.plot(history.history['accuracy'])
        plt.plot(history.history['val_accuracy'])
        plt.plot(metrics.val_f1s)
        plt.plot(metrics.val_recalls)
        plt.plot(metrics.val_precisions)
        plt.legend(['training', 'validation', 'val_f1', 'val_recall', 'val_precision'], loc='upper left')

        plt.savefig(os.path.join(path_net, 'result', 'CNN_text_2', nowtime + '_cnn_result' + '.png'))
        plt.show()
    except:
        print("无法画图")
    finally:
        with open(os.path.join(path_net, 'result', 'CNN_text_2', nowtime + '_history.txt'), 'w+') as f:
            f.write(str(history.history))
    f = open('cnn_text.py', 'r', encoding='utf-8')
    fff = f.read()
    f.close()
    nf = open(os.path.join(path_net, 'result', 'CNN_text_2', nowtime + '_code' + '.py'), 'w+')
    nf.write(fff)
    nf.close()
    print('结束')
    return 0
Пример #27
0
num_departments = 4  # Number of departments for predictions

title_input = keras.Input(shape=(None, ),
                          name="title")  # Variable-length sequence of ints
body_input = keras.Input(shape=(None, ),
                         name="body")  # Variable-length sequence of ints
tags_input = keras.Input(shape=(num_tags, ),
                         name="tags")  # Binary vectors of size `num_tags`

# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)

# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)

# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])

# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)

# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
    inputs=[title_input, body_input, tags_input],
    outputs=[priority_pred, department_pred],
Пример #28
0
vocab_len = len(chars)

char_indices = dict((char, chars.index(char)) for char in chars)
char_indices

x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)

for i, sentence in enumerate(sentences):
    for t, char in enumerate(sentence):
        x[i, t, char_indices[char]] = 1
    y[i, char_indices[next_chars[i]]] = 1

from tensorflow.keras import layers
model = tf.keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, vocab_len)))

model.add(layers.Dense(vocab_len, activation="softmax"))

model.summary()

model.compile(loss="categorical_crossentropy", optimizer="adam")


def sample(preds, temperature=1.0):
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas)
# creating instance of one-hot-encoder
enc = OneHotEncoder(handle_unknown='ignore')
# passing bridge-types-cat column (label encoded values of bridge_types)
enc_df = pd.DataFrame(enc.fit_transform(train[['LABEL']]).toarray())

enc_df = np.array(enc_df)
enc_df.shape

from tensorflow.keras import layers
from tensorflow.keras import Model

x_input = layers.Input(shape=(500, ))

emb = layers.Embedding(500, 256, input_length=max_length)(x_input)

bi_rnn1 = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(emb)

bi_rnn2 = layers.Bidirectional(layers.LSTM(128, ))(bi_rnn1)

x_output = layers.Dense(20, activation="softmax")(bi_rnn2)

model1 = Model(inputs=x_input, outputs=x_output)
model1.compile(optimizer='adam',
               loss='categorical_crossentropy',
               metrics=['accuracy'])

model1.summary()

train = 0
train_encode = 0
Пример #30
0
from util import f1_score, recall, precision, plot_graph
import matplotlib.pyplot as plt
plt.style.use('ggplot')

PLOT_GRAPH = False
PLOT_MODEL = False

max_features = 5000  # Only consider the top 5k words
maxlen = 500  # Only consider the first 500 words of each movie review

# Input for variable-length sequences of integers
inputs = keras.Input(shape=(None,), dtype="int32")
# Embed each integer in a 64-dimensional vector
x = layers.Embedding(max_features, 64)(inputs)
# Add 2 bidirectional LSTMs
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
x = layers.Bidirectional(layers.LSTM(64))(x)
# Add a classifier
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()

# load imdb data
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(
    num_words=max_features
)
print(len(x_train), "Training sequences")
print(len(x_test), "Validation sequences")
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)