def createNpModel(): inputs = Input((1, 1)) model = GRU(512, return_sequences=True, reset_after=True)(inputs) model = Dropout(0.25)(model) model = GRU(512, reset_after=True)(model) model = Dropout(0.25)(model) model = Dense(1, activation='relu')(model) model = Model(inputs, model) model.compile(optimizer='adam', loss='mae', metrics=['accuracy']) return model
def train_health(dense, dimension, batch_size, hid_layer, num_epochs, x_train, y_train, x_test, y_test): look_ahead = 2 timesteps = look_ahead - 1 #top = get_rare() print('dense : ' + str(dense) + ' dimension : ' + str(dimension) + ' batch size : ' + str(batch_size) + 'hidden layer : ' + str(hid_layer) + ' num of epochs : ' + str(num_epochs)) # expected input data shape: (batch_size, timesteps, data_dim) inputs = Input(shape=( timesteps, 284, )) model = attention(inputs) model = GRU(hid_layer, input_shape=(timesteps, 284), activation='relu', dropout=0.5)(model) #model = Sequential() #model.add(GRU(hid_layer,input_shape=(timesteps, dimension))) # returns a sequence of vectors of dimension 32 #model.add(Activation('relu')) #model.add(Dropout(0.5)) #model.add(Dense(284, activation='softmax', kernel_regularizer=h_reg)) output = Dense(284, activation='softmax', kernel_regularizer=h_reg)(model) model = Model(input=[inputs], output=output) # model.add(Dense(dimension, kernel_regularizer=h_reg )) # model.compile(loss=losses.mean_squared_error, # optimizer='rmsprop') print("MODEL COMPILED. TRAINING AND VALIDATION STARTED.") model.compile(loss='binary_crossentropy', optimizer='adam') print(model.summary()) model.fit([x_train], y_train, batch_size=batch_size, epochs=num_epochs, verbose=2) return model