D = [27, 72, 70, 74, 57, 72, 19, 30, 75, 57]

for NConv, NJanet, NDense in zip(C, J, D):
    acc_list = []
    prec_list = []
    recall_list = []
    f1_list = []
    support_list = []
    for i in range(repetition):
        early_stopping = EarlyStopping(monitor='val_acc', patience=patience)
        checkpoint = ModelCheckpoint('6dmg_try.h5', monitor='val_acc', verbose=0, save_weights_only=True)
        nadam = Nadam(lr=1e-4, beta_1=0.9, beta_2=0.9)

        model1 = Sequential()
        model1.add(Conv1D(NConv, 5, activation='relu', input_shape=(240, 6)))
        model1.add(JANET(NJanet, activation='relu')) #, input_shape=(None, 6)))
        model1.add(Dense(NDense, activation='relu'))
        model1.add(Dense(20, activation='softmax'))
        print model1.summary(90)
        model1.compile(loss='categorical_crossentropy', optimizer=nadam, metrics=['accuracy'])
        model1.fit(xTrain,
                   yTrain,
                   epochs=epochs,
                   validation_data=(xVal, yVal),
                   callbacks=[early_stopping],
                   verbose=1,
                   batch_size=batch_size)

        predictedTest = np.argmax(model1.predict(xTest), axis=1)
        acc = accuracy_score(yTest, predictedTest)
        precision, recall, f1, support = precision_recall_fscore_support(yTest, predictedTest, average='weighted')
 prec_list = []
 recall_list = []
 f1_list = []
 for i in range(repetition):
     early_stopping = EarlyStopping(monitor='val_acc', patience=patience)
     checkpoint = ModelCheckpoint('6dmg_try.h5',
                                  monitor='val_acc',
                                  verbose=0,
                                  save_weights_only=True)
     nadam = Nadam(lr=1e-4, beta_1=0.9, beta_2=0.9)
     model6 = Sequential()
     model6.add(Conv1D(NConv1, 5, activation='relu', input_shape=(240, 6)))
     model6.add(Dropout(0.2))
     model6.add(Conv1D(NConv2, 5, activation='relu'))
     model6.add(Dropout(0.2))
     model6.add(JANET(NJanet1, activation='relu', return_sequences=True))
     model6.add(Dropout(0.2))
     model6.add(JANET(NJanet2, activation='relu'))
     model6.add(Dropout(0.2))
     model6.add(Dense(NDense1, activation='relu'))
     model6.add(Dropout(0.2))
     model6.add(Dense(NDense2, activation='relu'))
     model6.add(Dropout(0.2))
     model6.add(Dense(20, activation='softmax'))
     model6.compile(loss='categorical_crossentropy',
                    optimizer=nadam,
                    metrics=['accuracy'])
     model6.fit(xTrain,
                yTrain,
                epochs=epochs,
                validation_data=(xVal, yVal),
Beispiel #3
0
    acc_list = []
    prec_list = []
    recall_list = []
    f1_list = []
    for i in range(repetition):
        early_stopping = EarlyStopping(monitor='val_acc', patience=patience)
        checkpoint = ModelCheckpoint('6dmg_try.h5',
                                     monitor='val_acc',
                                     verbose=0,
                                     save_weights_only=True)
        nadam = Nadam(lr=1e-4, beta_1=0.9, beta_2=0.9)

        model7 = Sequential()
        model7.add(
            JANET(NJanet1,
                  activation='relu',
                  return_sequences=True,
                  input_shape=(None, 6)))
        model7.add(Dropout(0.2))
        model7.add(JANET(NJanet2, activation='relu'))
        model7.add(Dropout(0.2))
        model7.add(Dense(NDense1, activation='relu'))
        model7.add(Dropout(0.2))
        model7.add(Dense(NDense2, activation='relu'))
        model7.add(Dropout(0.2))
        model7.add(Dense(20, activation='softmax'))
        model7.compile(loss='categorical_crossentropy',
                       optimizer=nadam,
                       metrics=['accuracy'])
        model7.fit(xTrain,
                   yTrain,
                   epochs=epochs,
Beispiel #4
0
            add_indices[i, [first_half, second_half]] = 1.

        # Zip the values and indices in a third dimension:
        # inputs has the shape (batch_size, time_steps, 2)
        inputs = np.dstack((add_values, add_indices))
        targets = np.sum(np.multiply(add_values, add_indices), axis=1)

        # center at zero mean
        inputs -= np.mean(inputs, axis=0, keepdims=True)

        yield inputs, targets


print('Build model...')
model = Sequential()
model.add(JANET(NUM_UNITS, input_shape=(TIME_STEPS, 2)))
model.add(Dense(1, activation='linear'))

# try using different optimizers and different optimizer configs
optimizer = Adam(LEARNING_RATE, amsgrad=True)
model.compile(loss='mse', optimizer='adam')

model.fit_generator(batch_generator(),
                    steps_per_epoch=STEPS_PER_EPOCH,
                    epochs=NUM_EPOCHS,
                    verbose=1,
                    callbacks=[
                        ModelCheckpoint('weights/janet_addition_%d.h5' %
                                        (TIME_STEPS),
                                        monitor='loss',
                                        save_best_only=True,
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_shape=(maxlen, )))
model.add(JANET(128, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(1, activation='sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=15,
          validation_data=(x_test, y_test),
x_train = (x_train - x_train.mean(axis=0, keepdims=True)) / (
    x_train.std(axis=0, keepdims=True) + 1e-8)
x_test = (x_test - x_test.mean(axis=0, keepdims=True)) / (
    x_test.std(axis=0, keepdims=True) + 1e-8)

print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

print('Evaluate IRNN...')
model = Sequential()
model.add(JANET(hidden_units, input_shape=x_train.shape[1:]))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

rmsprop = Adam(lr=learning_rate, amsgrad=True)

model.compile(loss='categorical_crossentropy',
              optimizer=rmsprop,
              metrics=['accuracy'])

# model.fit(x_train, y_train,
#           batch_size=batch_size,
#           epochs=epochs,
#           verbose=1,
#           validation_data=(x_test, y_test),