Exemplo n.º 1
0
def get_optfastrnnlstm(shape, dropout, first_layer_neurons,
                       second_layer_neurons):
    model = Sequential()
    with tf.variable_scope("LSTM1", reuse=tf.AUTO_REUSE) as scope:
        model.add(
            LSTM(first_layer_neurons,
                 input_shape=(shape),
                 return_sequences=True,
                 celltype="FastRNNCell"))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("LSTM2", reuse=tf.AUTO_REUSE) as scope:
        model.add(
            LSTM(second_layer_neurons,
                 return_sequences=False,
                 celltype="FastRNNCell"))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("DENSE1", reuse=tf.AUTO_REUSE) as scope:
        model.add(Dense(second_layer_neurons, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(dropout))

    with tf.variable_scope("DENSE2", reuse=tf.AUTO_REUSE) as scope:
        model.add(Dense(1, activation='sigmoid'))
        opt = tf.keras.optimizers.Adam(lr=1e-2, decay=1e-3)
        model.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        model.summary()
    return model
Exemplo n.º 2
0
def create_dummy_classifier(window_size: int,
                            num_rows_df: int,
                            num_output_fields: int,
                            neurons_rnn: int = 10,
                            dropout: float = 0.0,
                            learning_rate: float = 0.01,
                            bidirection: bool = True,
                            return_sequences: bool = False):
                            
    lr_schedule = keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=10000,
        decay_rate=0.9)

    model = keras.Sequential(name='dummy_classifier')

    model.add(Input(shape=(window_size, num_rows_df), name='input'))

    if bidirection:
        model.add(Bidirectional(
            LSTM(neurons_rnn, return_sequences=return_sequences),
            name='bidirection'))
    else:
        model.add(LSTM(neurons_rnn, name="rnn",
                       return_sequences=return_sequences))
    if return_sequences:
        model.add(Flatten())
    model.add(Dropout(dropout, name='dropout'))
    model.add(Dense(num_output_fields, activation='sigmoid', name='dense_output'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(learning_rate=lr_schedule), metrics=['accuracy', 'binary_accuracy'])
    return model
Exemplo n.º 3
0
def main():
    samples = load_files("data")

    sequence_dim = 20
    sequence_lag = 1

    samples, labels = make_sequences(samples, sequence_dim, sequence_lag)

    model = Sequential()
    model.add(LSTM(128, input_shape=(sequence_dim, 2), return_sequences=True))
    model.add(LSTM(128))
    model.add(Dense(64))
    model.add(Dense(2))

    print(model.summary())

    (trainSamples, testSamples, trainLabels,
     testLabels) = train_test_split(samples,
                                    labels,
                                    test_size=0.15,
                                    random_state=42)

    imname = "animal-11"
    image = cv2.imread("img/{}.jpg".format(imname))
    # create ground truth image with all train gazes
    for j in range(len(trainLabels)):
        s = trainLabels[j]
        cv2.circle(image, (int(s[0]), int(s[1])), 10, (255, 0, 0), 3)
    cv2.imwrite("img/{}_truth.jpg".format(imname), image)

    model.compile(loss="mean_absolute_error",
                  optimizer="adam",
                  metrics=["mae"])

    EPOCHS = 30
    for e in range(EPOCHS):
        print("=" * 50)
        print("Iteration: {}".format(e))
        model.fit(trainSamples,
                  trainLabels,
                  validation_data=(testSamples, testLabels),
                  epochs=1,
                  batch_size=128,
                  verbose=1)

        predictions = model.predict(testSamples)

        # create and save image with all current predictions
        image = cv2.imread("img/{}.jpg".format(imname))
        cv2.line(image, (0, 0), (200, 200), (255, 255, 255), 2)
        for p in predictions:
            cv2.circle(image, (int(p[0]), int(p[1])), 10, (0, 255, 0), 3)
        cv2.imwrite("img/{}_e{:02d}.jpg".format(imname, e), image)

    model.save("model_rnn.h5")
Exemplo n.º 4
0
def performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled):

    X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
    forcast_scaled = np.reshape(
        forcast_scaled, (forcast_scaled.shape[0], forcast_scaled.shape[1], 1))

    regressor = Sequential()

    dropoutunit = p.dropoutunit
    LSTM_unit_increment = p.LSTM_unit_increment

    # Adding the first LSTM layer and some Dropout regularisation
    regressor.add(
        LSTM(units=50,
             return_sequences=True,
             input_shape=(X_train.shape[1], 1)))
    regressor.add(Dropout(dropoutunit))

    LSTM_units = 50
    LSTM_units = LSTM_units + LSTM_unit_increment

    # Adding a second LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units=LSTM_units, return_sequences=True))
    regressor.add(Dropout(dropoutunit))

    # Adding a third LSTM layer and some Dropout regularisation
    LSTM_units = LSTM_units + LSTM_unit_increment

    regressor.add(LSTM(units=LSTM_units, return_sequences=True))
    regressor.add(Dropout(dropoutunit))

    # Adding a fifth LSTM layer and some Dropout regularisation
    LSTM_units = LSTM_units + LSTM_unit_increment
    regressor.add(LSTM(units=LSTM_units))
    regressor.add(Dropout(dropoutunit))

    # print(X_train.shape,y_train.shape)
    # Adding the output layer
    regressor.add(Dense(units=1))

    # Compiling the RNN
    regressor.compile(optimizer='adam', loss='mean_squared_error')

    # Fitting the RNN to the Training set
    regressor.fit(X_train, y_train, epochs=p.epochs, batch_size=p.batch_size)
    print('rnn model build', X_test.shape)

    score = regressor.evaluate(X_test, y_test, batch_size=100, verbose=0)
    return regressor, score, X_train, X_test, y_train, y_test, forcast_scaled
def model(X_train, X_test, y_train, y_test):

    model = tf.keras.models.Sequential()
    model.add(LSTM(units=200, input_shape=X_train.shape[-2:]))
    model.add(Dropout(0.41))
    model.add(Dense(1))
    model.add(Activation('relu'))
    model.compile(optimizer='adam', metrics=['mae', 'mse'], loss='mse')
    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)
    model.fit(X_train,
              y_train,
              batch_size=64,
              epochs=200,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    loss, mae, mse = model.evaluate(X_test, y_test, verbose=0)

    print(' mae:', mae)
    print(' mse:', mse)
    print(' loss:', loss)
    return model
Exemplo n.º 6
0
def model(X_train, X_test, y_train, y_test):

    model = tf.keras.models.Sequential()
    model.add(
        LSTM(units={{choice([100, 200, 500])}},
             input_shape=X_train.shape[-2:]))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh'])}}))
    model.compile(optimizer='adam', metrics=['mae', 'mse'], loss='mse')
    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)
    model.fit(X_train,
              y_train,
              batch_size={{choice([32, 64, 128])}},
              epochs={{choice([100, 200, 500, 1000])}},
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    loss, mae, mse = model.evaluate(X_test, y_test, verbose=0)

    print('Test mae:', mae)
    print('Test mse:', mse)
    return {'loss': loss, 'status': STATUS_OK, 'model': model}
Exemplo n.º 7
0
Arquivo: rnn.py Projeto: ss-koishi/RNN
def main():

    sin = md.make_noised_sin()
    size = 60
    (x_train, y_train), (x_test, y_test) = train_test_split(sin, n_prev = size)

    model = Sequential([
        LSTM(hidden, batch_input_shape=(None, size, io), return_sequences=False),
        Dense(io),
        Activation('linear')
    ])

    model.compile(loss='mean_squared_error', optimizer='adam')
    stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=10)
    model.fit(x_train, y_train, batch_size=256, nb_epoch=1000, validation_split=0.1, callbacks=[stopping])

    result = []
    future_steps = 1000
    future_data = [x_train[-1:][-1:][-size:]]
    for i in range(future_steps):
      print(i)
      pred = model.predict(future_data)
      future_data = np.delete(future_data, 0)
      future_data = np.append(future_data, pred[-1:]).reshape(1, size, 1)
      result = np.append(result, pred[-1:])

    plt.figure()
    #plt.plot(y_test.flatten())
    output = y_train[-100:]
    plt.plot(range(0, len(output)), output, label='input')
    plt.plot(range(len(output), len(output) + future_steps), result, label='future')
    plt.title('Sin Curve prediction')
    plt.legend(loc='upper right')
    plt.savefig('result.png')
Exemplo n.º 8
0
    def cria_rede_neural_univariada(self, df):
        """ Cria rede neural "univariada" usando o Keras Subclass, retornando um modelo
        do Keras. """

        dias_distintos = df['Dia'].unique()
        meses_distintos = df['Mes'].unique()
        dias_semana_distintos = df['Dia_Semana'].unique()
        ''' Adiciona as camadas ao modelo. '''
        self.embedding_dia = Embedding(name='dia_embedding',
                                       input_length=1,
                                       input_dim=len(dias_distintos),
                                       output_dim=int(
                                           round(len(dias_distintos)**0.25,
                                                 0)))
        self.flatten_dia = Flatten()
        self.embedding_mes = Embedding(
            name='mes_embedding',
            input_length=1,
            input_dim=len(meses_distintos),
            output_dim=int(round(len(meses_distintos)**0.25, 0)))
        self.flatten_mes = Flatten()
        self.embedding_dia_semana = Embedding(
            name='dia_semana_embedding',
            input_length=1,
            input_dim=len(dias_semana_distintos),
            output_dim=int(round(len(dias_semana_distintos)**0.25, 0)))
        self.flatten_dia_semana = Flatten()
        self.concatenate_dia_mes = Concatenate(axis=-1,
                                               name='dia_mes_concatenate')
        self.dense_dia_mes = Dense(2, activation='relu', name='dia_mes_dense')
        self.lstm_valor = LSTM(1, name='valor_lstm')
        self.dense_valor = Dense(1, activation='relu', name='valor_dense')
        self.concatenate_dia_mes_valor = Concatenate(
            axis=-1, name='dia_mes_valor_concatenate')
Exemplo n.º 9
0
    def feature_classifier(self, weight_file_path):
        model = Sequential()
        model.add(Bidirectional(LSTM(units=cfg.HIDDEN_UNITS, return_sequences=True),
                                input_shape=(cfg.EXPECTED_FRAMES, cfg.NUM_INPUT_TOKENS)))
        model.add(Dropout(0.4))
        model.add(Bidirectional(LSTM(128)))
        model.add(Dropout(0.4))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.4))
        model.add(Dense(self.nb_classes))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        model.load_weights(weight_file_path)

        return model
Exemplo n.º 10
0
def makeModel():
    model = Sequential()
    # How to decide the output dim of embedding?
    model.add(Embedding(total_words,500,input_length= input_len))
    model.add(Bidirectional(LSTM(500)))
    model.add(Dense(total_words,activation='softmax'))
    model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
    model.summary()
    return model
Exemplo n.º 11
0
def bidirectional_model():

    length_vocab, embedding_size = word2vec.shape

    model = Sequential()
    model.add(
        Embedding(length_vocab,
                  embedding_size,
                  input_length=parameters.max_length,
                  weights=[word2vec],
                  mask_zero=True,
                  name='embedding_layer'))

    for i in range(parameters.rnn_layers):
        bilstm = Bidirectional(
            LSTM(parameters.rnn_size,
                 return_sequences=True,
                 name='bilstm_layer_%d' % (i + 1)))
        model.add(bilstm)

    model.add(
        Lambda(simple_context,
               mask=lambda inputs, mask: mask[:, parameters.max_len_desc:],
               output_shape=lambda input_shape:
               (input_shape[0], parameters.max_len_head, 2 *
                (parameters.rnn_size - parameters.activation_rnn_size)),
               name='simple_context_layer'))

    vocab_size = word2vec.shape[0]
    model.add(TimeDistributed(Dense(vocab_size,
                                    name='time_distributed_layer')))

    model.add(Activation('softmax', name='activation_layer'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    K.set_value(model.optimizer.lr, np.float32(parameters.learning_rate))
    print(model.summary())

    return model
Exemplo n.º 12
0
def main():

    (x_train, y_train), (x_test, y_test) = make_dataset()

    model = Sequential([
        LSTM(hidden, batch_input_shape=(None, sequence_size, io), return_sequences=False),
        Dense(io),
        Activation('linear')
    ])

    model.compile(loss='mean_squared_error', optimizer='adam')
    stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=10)
    model.fit(x_train, y_train, batch_size=512, epochs=200, validation_split=0.24, callbacks=[stopping])

    result = []
    future_steps = 50
    future_data = x_test[-1:][-1:][-sequence_size:]
    print(future_data)
    for i in range(future_steps):
      print(i)
      pred = model.predict(future_data)
      future_data = np.delete(future_data, 0)
      #print(future_data)
      future_data = np.append(future_data, pred[-1:]).reshape(1, sequence_size, 1)
      #print(future_data)
      result = np.append(result, pred[-1:][-1:])


    input_pred = model.predict(x_test).flatten()
    plt.figure()
    #plt.plot(y_test.flatten())
    plt.plot(range(0, len(y_test)), y_test, label='input')
    plt.plot(range(0, len(input_pred)), input_pred, label='predict') 
    plt.plot(range(len(input_pred), len(input_pred) + future_steps), result, label='future')
    plt.title('Wave prediction')
    plt.legend(loc='upper right')
    plt.savefig('result.png')
Exemplo n.º 13
0
    sample_X = sample_X.reshape(1, 6, 69)
    #sample_Y = sample_Y.reshape(1,1,69)

    #print(sample_X.shape)
    #print(sample_Y.shape)

    return sample_X, sample_Y


#this_x, this_y = gen_sample(0)

model = Sequential()
#model.add(LSTM(276, input_shape = (6,69), return_sequences = True))
#model.add(LSTM(276, input_shape = (6,69), return_sequences = True))
model.add(LSTM(276, input_shape=(6, 69), return_sequences=True))
#model.add(LSTM(138, input_shape = (6,69), return_sequences = True))
model.add(LSTM(207, input_shape=(6, 69)))
model.add(tf.keras.layers.Dense(138, activation='relu'))
#model.add(tf.keras.layers.Dense(69, activation='swish'))
model.add(tf.keras.layers.Dense(69, activation='softmax'))

model.compile(optimizer=keras.optimizers.Adagrad(lr=.0001),
              loss="categorical_crossentropy")
model.summary()

test_1 = [9, 36, 49, 56, 62, 9]
#june 27
test_2 = [15, 28, 52, 53, 63, 18]
#july 1
test_3 = [16, 21, 27, 60, 61, 6]
Exemplo n.º 14
0
    X_train, y_train = _load_data(df.iloc[0:ntrn], n_prev)
    X_test, y_test = _load_data(df.iloc[ntrn:], n_prev)
    return (X_train, y_train), (X_test, y_test)


(X_train, y_train), (X_test, y_test) = train_test_split(input_dataFrame)

#ニューラルネットワークモデルの作成
in_out_neurons = 8
hidden_neurons = 300
length_of_sequences = 50

model = Sequential()
model.add(
    LSTM(hidden_neurons,
         batch_input_shape=(None, length_of_sequences, in_out_neurons),
         return_sequences=True))
#model.add(LSTM(hidden_neurons, return_sequences=True)) # 32次元のベクトルのsequenceを出力する
model.add(LSTM(hidden_neurons))  # 32次元のベクトルを一つ出力する
model.add(Dense(1, activation='linear'))
#model.add(Dense(in_out_neurons))
#model.add(Activation("linear"))
model.compile(
    loss="mean_squared_error",
    optimizer="adam",
)

#学習の実施
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=0)
history = model.fit(X_train,
                    y_train[:, 0],
Exemplo n.º 15
0

    print('(3) split data set...')
    p1 = int(len(data) * (1 - VALIDATION_SPLIT - TEST_SPLIT))
    p2 = int(len(data) * (1 - TEST_SPLIT))
    x_train = data[:p1]
    y_train = labels[:p1]
    x_val = data[p1:p2]
    y_val = labels[p1:p2]
    x_test = data[p2:]
    y_test = labels[p2:]
    print('train docs: ' + str(len(x_train)), 'val docs: ' + str(len(x_val)), 'test docs: ' + str(len(x_test)))


    print('(4) training model...')
    model = Sequential()
    model.add(Embedding(len(word_index) + 1, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH))
    model.add(LSTM(200, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dropout(0.2))
    model.add(Dense(labels.shape[1], activation='softmax'))
    model.summary()
    plot_model(model, to_file=os.path.join(ckpt_path, 'lstm_model.png'), show_shapes=True)

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
    print(model.metrics_names)
    model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=2, batch_size=128)
    model.save(os.path.join(ckpt_path, 'lstm.h5'))

    print('(5) testing model...')
    print(model.evaluate(x_test, y_test))
Exemplo n.º 16
0
    sample_X = encoded_trim[num_start:num_start + 6, :]
    sample_Y = encoded_trim[num_start + 6:num_start + 7, :]

    sample_X = sample_X.reshape(1, 6, 69)
    #sample_Y = sample_Y.reshape(1,1,69)

    #print(sample_X.shape)
    #print(sample_Y.shape)

    return sample_X, sample_Y


#this_x, this_y = gen_sample(0)

model = Sequential()
model.add(LSTM(138, input_shape=(6, 69), return_sequences=True))
model.add(LSTM(69, input_shape=(6, 69)))
model.add(tf.keras.layers.Dense(69, activation='softmax'))

model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.05),
              loss="categorical_crossentropy")
model.summary()

test_num = [9, 36, 49, 56, 62, 9]
#june 27

test_num = np.asarray(test_num)
test_num = test_num.reshape(-1, 1)

test_num_encode = ohe.transform(test_num).toarray()
#print(test_num_encode)
Exemplo n.º 17
0
print(train_data.shape)
print(train_data.head)
print("")'''

train_x = train_data.iloc[:, 1:]
train_y = train_data.iloc[:, 1]

val_x = val_data.iloc[:, 1:]
val_y = val_data.iloc[:, 1]
'''print("")
print(train_y.shape)
print(train_y.head)
print("")'''

model = Sequential()
model.add(LSTM(64, input_shape=(2077, 9), return_sequences=True))
model.add(tf.keras.layers.Dense(8, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='relu'))

model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
              loss="mse")
model.summary()

path_checkpoint = "model_checkpoint.h5"
es_callback = keras.callbacks.EarlyStopping(monitor="val_loss",
                                            min_delta=0,
                                            patience=5)

#print(train_x.shape)

modelckpt_callback = keras.callbacks.ModelCheckpoint(
Exemplo n.º 18
0
        a = dataset[i:(i + time_steps)]
        dataX.append(a)
        dataY.append(dataset[i + time_steps])
    return np.array(dataX), np.array(dataY)


TIME_STEPS = 3
trainX, trainY = create_dataset(train, TIME_STEPS)
testX, testY = create_dataset(test, TIME_STEPS)

trainX = trainX[len(trainX) % batch_size:]
trainY = trainY[len(trainY) % batch_size:]
testX = testX[len(testX) % batch_size:]
testY = testY[len(testY) % batch_size:]

trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))

model = Sequential()
model.add(LSTM(HIDDEN_SIZE, batch_input_shape=(BATCH_SIZE, TIME_STEPS, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')

#x : 直前の層の出力
#W : shape= [直前の層の出力数、Dense()の出力数] の重み
#b : shape= [Dense() の出力数] のバイアス

#Dense() の出力 = x・W + b   # ・は行列の積

model.fit(trainX, trainY, nb_epoch=NB_EPOCH, batch_size=BATCH_SIZE)
Exemplo n.º 19
0
print('단어 카운트:', token.word_counts)
print('문장 카운트:', token.document_count)
print('각 단어가 몇개의 문장에 포함되어 있는가 :', token.word_docs)
print('각 단어에 매겨진 인덱스 값 :', token.word_index)

print()
# 텍스트를 읽고 긍정 , 부정 분류 예측 

docs = ['너무 재밌네요', '최고에요','참 잘만든 영화예요','추천하고 싶은 영화네요','한번 더 보고싶네요',
        '글쎄요','별로네요','생각보다 지루합니다','연기가 좋지않아요','재미없어요']

import numpy as np 
classes = np.array([1,1,1,1,1,0,0,0,0,0])

token = Tokenizer()
token.fit_on_texts(docs)
print(token.word_index)

model = Sequential()
model.add(Embedding(word_size,8,input_length=4))
#model.add(Flatten())
model.add(LSTM(32))
model.add(Dense(1,activation='sigmoid'))

print(model.summary())
model.compile(optimizer='adam',loss='binary_crossentropy')




def neuralnet(no_model,dnafull,dna0,dna1,dna2,dna3,dna4,dna5,dna6):
    
    """
    dna_temp[0] hid_layer_num INT 1~5
    dna_temp[1] hid_layer_node INT 16~128
    dna_temp[2] epoch INT 100~500
    dna_temp[3] dropout FLOAT 0.00~0.20
    dna_temp[4] maxlen INT 9~19
    dna_temp[5] time_bias INT 1~9
    dna_temp[6] layer INT 1~3
    """
    
    """
    パラメーター設定
    """
    #入力層次元
    n_in = 20
    #中間層次元、層数
    n_hiddens = list()
    for i in range(dna0):
        n_hiddens.append(dna1)
    n_centors = dna0
    #出力層次元、層数
    n_out = 5
    #活性化関数
    activation = 'relu'
    #ドロップアウト率
    p_keep = dna3
    #計算回数
    epochs = dna2
    #EarlyStoppingするか
    isEs= False
    #EarlyStoppingをするまでの回数
    es_patience= 60
    #ミニバッチ処理のサイズ
    batch_size = 1000
    #最適化アルゴリズム
    opt='rmsprop'
    #学習率(本プログラムでは未使用でデフォルト値を使用)
#    learning_rate=0.001
    #Adamのパラメータ(最適化アルゴリズムがAdamの時のみ使用・本プログラムでは未使用)
#    beta_1=0.9
#    beta_2=0.999
    #reccrentの参照数
    maxlen= dna4
    #Yを何秒ずらすか(=0だと過去maxlen秒参照、=maxlen/2だと前後maxlen/2秒参照、=maxlenだと未来maxlen秒参照になる)
    time_bias= dna5
    
    #RNNの種類(SimpleRNN,LSTM,GRU)
    layer_int = dna6
    
    #双方向性を使用するか
    BiDir= False
    
    #RNNの偶数層を逆向きにするか
    back= False
    
    #乱数の固定シード
#    ranseed= 12345
    
#    weight1 = 1
#    weight2 = 1
#    
    print('No_%d' % no_model)
    print(dna0,dna1,dna2,dna3,dna4,dna5,dna6)
    
    #乱数固定
    
    import os
    os.environ['PYTHONHASHSEED']='0'
#    np.random.seed(ranseed)
#    rn.seed(ranseed)
    
    #スレッド数等を1に固定(再現性に必要)
    session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    
    from tensorflow.python.keras import backend as K
#    tf.compat.v1.set_random_seed()
    
     
    sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),config=session_conf)
    K.set_session(sess)
    
    
    #重み初期化
    init=initializers.TruncatedNormal()
    
    #ファイルの名前
    name = 'linear_data_FIR8_comAngle&AveStd_coor_s1_ntd2_26'
#    number = '-2'
    #ファイル読み込み
    csv_input = pd.read_csv(filepath_or_buffer= name+".csv", encoding="ms932", sep=",")
    array = csv_input.values
    
    #仮の入出力値を読み取り
    
    X=array[:,1:n_in+1].astype(np.float32)
    
    Y=array[:,n_in+1].astype(np.int)
    
    
    #タイムスタンプを読み取り
    TIME=array[:,0]
    
    leng = len(Y)
    data = []
    target = []
    
    i = 0

    for i in range(maxlen, leng):
    	#入力データを参照秒数ごとにまとめる
    	data.append(X[i-maxlen+1:i+1,:])
    	#出力データをN秒ごとの作業に変換
    	target.append(Y[i-time_bias])
    #入出力データのshapeの調整
    X = np.array(data).reshape(len(data), maxlen, n_in)
    Y = np.array(target)
    
    #タイムスタンプを入出力データと同期
    TIME=TIME[maxlen-time_bias:leng-time_bias]
    
    #学習データとテストデータの分割
    x_train, x_test, y_train0, y_test0,time_train,time_test = train_test_split(X, Y,TIME, train_size=0.85,shuffle=False)
    
    #学習データをtrainとvalidationに分割
    x_train, x_validation, y_train0, y_validation0 = train_test_split(x_train, y_train0,train_size=0.9,shuffle=False)
        
    #yを1ofKデータに変換(train,val,test)
    ntr=y_train0.size
    y_train=np.zeros(n_out*ntr).reshape(ntr,n_out).astype(np.float32)
    for i in range(ntr):
    	y_train[i,y_train0[i]]=1.0
    
    nte=y_test0.size
    y_test=np.zeros(n_out*nte).reshape(nte,n_out).astype(np.float32)
    for i in range(nte):
    	y_test[i,y_test0[i]]=1.0
    
    
    y_validation=np.eye(n_out)[(y_validation0.reshape(y_validation0.size))]
        
#    nrow=y_test0.size
   
    # モデル設定
    
    model = Sequential()
        
    for i in range(n_centors):
    	if(i==n_centors-1):
    		retSeq=False
    	else:
    		retSeq=True
    	if(i%2==1 and back):
    		gBack=True
    	else:
    		gBack=False
    	if(i==0):
    		in_dir=n_in
    	else:
    		in_dir=n_hiddens[i-1]
        
    	if (layer_int==1):
    		if(BiDir):
    			model.add(Bidirectional(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    #			model.add(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    			model.add(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    
    	elif(layer_int==2):
    		if(BiDir):
    			model.add(Bidirectional(LSTM(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    			model.add(LSTM(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    	
    	elif(layer_int==3):
    		if(BiDir):
    			model.add(Bidirectional(GRU(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    			model.add(GRU(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))	
    
    model.add(Dense(n_out,kernel_initializer=init))
    model.add(Activation('softmax'))
    
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    
    early_stopping =EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1)
    
#    now = datetime.now().strftime('%Y%m%d%H%M')
#    flog = name+number+'.log1.csv'
#    
#    csv_logger=CSVLogger(flog)
    
    if (isEs):
    	caBacks=[early_stopping]#,csv_logger]
    
    else:
    	caBacks=[]#csv_logger]
    
    #モデル学習
    
#    start = time.time()
    
    model.fit(x_train,y_train, epochs=epochs, batch_size=batch_size,validation_data=(x_validation,y_validation),callbacks=caBacks)
    #hist = model.fit(x_train,y_train, epochs=epochs, batch_size=batch_size,callbacks=caBacks)
    #,callbacks=[early_stopping]
    
#    slapsed_time=time.time() - start
#    
#    
#    val_acc = hist.history['val_acc']
#    acc = hist.history['acc']
#    val_loss = hist.history['val_loss']
#    loss = hist.history['loss']
#

#now = datetime.now().strftime('%Y%m%d%H%M')
#
#plt.rc('font',family='serif')
#fig = plt.figure()
#plt.plot(range(len(loss)), loss, label='loss', color='r')
#plt.plot(range(len(val_loss)), val_loss, label='val_loss', color='b')
#plt.xlabel('epochs')
#plt.legend()
#plt.show()
#plt.savefig(name+number+'.loss.png')
#
##plt.rc('font',family='serif')
##fig = plt.figure()
##plt.plot(range(len(val_acc)), val_acc, label='acc', color='b')
##plt.xlabel('epochs')
##plt.show()
##plt.savefig(name+number+'.val_acc.png')
       
    classes = model.predict_classes(x_test, batch_size=1)
#prob = model.predict_proba(x_test, batch_size=1)

#重みの出力
#L1 = model.get_weights()
#W1 = np.dot(L1[0],L1[1])+L1[2]
#W2 = np.dot(W1,L1[3])
#W3 = np.dot(W2,L1[4])
#W4 = W3+L1[5]
#weight1 = np.dot(W4,L1[6])+L1[7]
#weight = weight1.transpose()

#結果を出力
    im = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
    ip = [0,0,0,0]
    it = [0,0,0,0]
    f = [0,0,0,0]

    ia = [0,0,0,0]
    ib = [0,0,0,0]

    j = 0
    for i in range(4):
        for j in range(y_test0.size):
            if y_test0[j]==i+1:
                it[i] += 1
                       
                if classes[j] == 1:
                    im[i][0] += 1
                if classes[j] == 2:
                    im[i][1] += 1
                if classes[j] == 3:
                    im[i][2] += 1
                if classes[j] == 4:
                    im[i][3] += 1
            else:
                pass
    
    for i in range(4):        
        for k in range(y_test0.size):
            if classes[k]==i+1:
                ip[i]+=1
            else:
                pass

    #再現率を導出        
    for i in range(4):
        if it[i]==0:
            ia[i] = 0
        else:
            ia[i] = im[i][i]/it[i]
    
    #適合率を導出    
    for i in range(4):
        if ip[i]==0:
            ib[i] = 0
        else:
            ib[i] = im[i][i]/ip[i]
    
    #F値を導出
    for i in range(4):
        if ia[i]+ib[i]==0:
            f[i] = 0
        else:
            f[i] = 2*ia[i]*ib[i]/(ia[i]+ib[i])
    
#    it_sum = sum(it)
#    ip_sum = sum(ip)
#    ii = im[0][0]+im[1][1]+im[2][2]+im[3][3]#+i5
    
    if_ave = sum(f)/4
    
    model.save(name+'_'+str(no_model)+".h5")
#    model.save("kanno_"+str(no_model)+".model")
   
# =============================================================================
    backend.clear_session()
# =============================================================================
    
    
    return if_ave
Exemplo n.º 21
0
    config = Config()

    print('(1)load data...')
    embedding_file = os.path.join(config.embedding_path, 'yelp.vector.bin')
    if not os.path.exists(embedding_file):
        train_d2v_model(os.path.join(config.data_path, 'train_docs.txt'), embedding_file)
        print('embedding_file not exist and then trained')

    x_train, x_docs = load_doc_to_vecs(os.path.join(config.data_path, 'train_docs.txt'), embedding_file)
    y_train_a = load_label_to_vecs(os.path.join(config.data_path, 'train_labels_a.txt'))
    y_train_p = load_label_to_vecs(os.path.join(config.data_path, 'train_labels_p.txt'))
    print('there are ' + str(len(all_words)) + ' words totally')
    print('there are ' + str(len(wtf_words)) + ' words not be embeded')
    print('train docs:' + str(x_train.shape))
    print('train labels of aspect:' + str(y_train_a.shape))
    print('train labels of opinion:' + str(y_train_p.shape))


    print('(2)build model...')
    main_input = Input(shape=(config.MAX_SEQUENCE_LENGTH, config.EMBEDDING_DIM), dtype='float32')
    lstm1 = LSTM(config.HIDDEN_SIZE, dropout=0.5, recurrent_dropout=0.5, return_sequences=True, name='lstm1')(main_input)
    out1 = Dense(3, activation='softmax', name='out1')(lstm1)
    model = Model(inputs=main_input, outputs=out1)
    model.summary()


    print('(3)run model...')
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
    model.fit(x_train, y_train_a, epochs=config.max_iter, batch_size=32)
    model.save(os.path.join(config.ckpt_path, 'lstm_model.h5'))
Exemplo n.º 22
0
import sys

df = pd.read_csv(
    r'C:\Users\Michael\Desktop\Python\pwrball_rand\pwr_ball - Copy.csv')

trim = df.drop(['prize', 'daysin', 'daycos', 'year'], axis=1)

sequence = trim.values.reshape(-1, 1).tolist()

ohe = OneHotEncoder().fit(sequence)

encoded_trim = ohe.transform(sequence).toarray()

model = Sequential()
model.add(
    LSTM(207, input_shape=(6, 69), stateful=True,
         batch_input_shape=(1, 6, 69)))
#model.add(LSTM(276, input_shape = (6,69), return_sequences = True, stateful = True))
#model.add(LSTM(207, input_shape = (6,69), stateful = True))
#model.add(LSTM(138, input_shape = (6,69), return_sequences = True, stateful = True))
#model.add(LSTM(69, input_shape = (6,69)))
#model.add(tf.keras.layers.Dense(69, activation='relu'))
model.add(tf.keras.layers.Dense(138, activation='relu'))
model.add(tf.keras.layers.Dense(69, activation='softmax'))

model.compile(optimizer=keras.optimizers.Adamax(lr=.01),
              loss="CategoricalCrossentropy",
              metrics=['acc'])

#model.build(input_shape=(1,6,69))

model.summary()
Exemplo n.º 23
0
    sample_X = encoded_trim[num_start:num_start + 6, :]
    sample_Y = encoded_trim[num_start+6:num_start+7, :]

    sample_X = sample_X.reshape(1,6,69)
    #sample_Y = sample_Y.reshape(1,1,69)

    #print(sample_X.shape)
    #print(sample_Y.shape)
    
    return sample_X, sample_Y

#this_x, this_y = gen_sample(0)

model = Sequential()
#model.add(LSTM(69, input_shape = (6,69), return_sequences = True))
model.add(LSTM(69, input_shape = (6,69)))
model.add(tf.keras.layers.Dense(69, activation='softmax'))


model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.00001), loss="categorical_crossentropy")
model.summary()

test_1 = [9,36,49,56,62,9]
#june 27
test_2 = [15,28,52,53,63,18]
#july 1
test_3 = [16,21,27,60,61,6]
#july4
test_4 = [3,10,34,36,62,5]
#july 8
test_5 = [14,19,61,62,64,4]
Exemplo n.º 24
0
    X_train, y_train = _load_data(df.iloc[0:ntrn], n_prev)
    X_test, y_test = _load_data(df.iloc[ntrn:], n_prev)
    return (X_train, y_train), (X_test, y_test)


(X_train, y_train), (X_test, y_test) = train_test_split(input_dataframe)

#ニューラルネットワークモデルの作成
in_out_neurons = 1
hidden_neurons = 300
length_of_sequences = 50

model = Sequential()
model.add(
    LSTM(hidden_neurons,
         batch_input_shape=(None, length_of_sequences, in_out_neurons),
         return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(
    loss="mean_squared_error",
    optimizer="adam",
)

#学習の実施
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=0)
history = model.fit(X_train,
                    y_train,
                    batch_size=600,
                    epochs=10,
                    validation_split=0.1,
Exemplo n.º 25
0
            y_test, y_val_pred)

        self.precisions.append(_precision)
        self.recalls.append(_recall)
        self.f1_scores.append(_f1)


metrics = ModelMetrics()

# ML model ----------------------------------------------

epochs = 10
ml_model1 = Sequential()

ml_model1.add(Embedding(max_features, 128, input_length=maxlen))
ml_model1.add(LSTM(128))
ml_model1.add(Dropout(0.5))
ml_model1.add(Dense(1))
ml_model1.add(Activation('sigmoid'))

ml_model1.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['mae', 'acc'])

## Splitting the test and train dataset
X_train, X_test, y_train, y_test = train_test_split(
    X,
    y,
    test_size=0.2,
)