def create_model(): units = 512 middle_units = 256 dropout_value = 0.3 activation_function = 'softmax' loss_function = 'categorical_crossentropy' optimizer = 'rmsprop' model = Sequential() model.add( LSTM(units, input_shape=(network_input.shape[1], network_input.shape[2]), recurrent_dropout=dropout_value, return_sequences=True)) model.add( LSTM( units, return_sequences=True, recurrent_dropout=dropout_value, )) model.add(LSTM(units)) model.add(BatchNormalization()) model.add(Dropout(dropout_value)) model.add(Dense(middle_units)) model.add(Activation('relu')) model.add(Dropout(dropout_value)) model.add(BatchNormalization()) model.add(Dropout(dropout_value)) model.add(Dense(vocab_size)) model.add(Activation(activation_function)) model.compile(loss=loss_function, optimizer=optimizer) return model
def create_model_pp(): model = Sequential() model.add(Flatten(input_shape=(300, 300, 3))) model.add(Dense(4, activation="softmax")) model.compile(loss=categorical_crossentropy, metrics=[categorical_accuracy]) return model
def create_model(): model = Sequential() model.add(Dense(10, activation="softmax", input_dim=3072)) model.compile(loss=sparse_categorical_crossentropy, metrics=[sparse_categorical_accuracy]) return model
def create_model(): model = Sequential() model.add(Dense(128, input_dim=3072)) # 32 * 32 * 3 model.compile(loss=sparse_categorical_crossentropy, metrics=[sparse_categorical_accuracy]) return model
def free_attn_lstm(dataset_object: LSTM_data): X_train, X_test, Y_train, Y_test = dataset_object.get_memory() X_train, X_test = X_train[:, :, :-12], X_test[:, :, :-12] regressor = Sequential() # Adding the first LSTM layer and some Dropout regularisation regressor.add(LSTM(units=NEURONS, return_sequences=True, activation=ACTIVATION, recurrent_activation="sigmoid", input_shape=(X_train.shape[1], X_train.shape[2]), bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) regressor.add(LSTM(units=NEURONS, activation=ACTIVATION, recurrent_activation="sigmoid", return_sequences=True, bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) # Adding a second LSTM layer and some Dropout regularisation regressor.add(LSTM(units=NEURONS, activation=ACTIVATION, recurrent_activation="sigmoid", bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) # Adding the output layer regressor.add(Dense(units=1, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) ) ) optim = Adam() # Compiling the RNN regressor.compile(optimizer=optim, loss='mean_squared_error') # Fitting the RNN to the Training set history= regressor.fit(X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_test, Y_test), callbacks=[REDUCE_LR, EARLY_STOP] ) regressor.save("data/weights/free_attn_lstm_no_senti") plot_train_loss(history) evaluate(regressor,X_test,Y_test, dataset_object,name="free_attn_lstm", senti="no")
def Train(self, input, target): X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75) Y_train = np.asarray(Y_train) Y_test = np.array(Y_test) X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]]) X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]]) model = Sequential() model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization()) model.add(GRU(16, return_sequences=True)) # model.add(Activation("sigmoid")) # model.add(LSTM(lstm_out)) model.add(Flatten()) model.add(Dense(8, activity_regularizer=l2(0.001))) # model.add(GRU(lstm_out, return_sequences=True)) # model.add(LSTM(lstm_out)) # model.add(Dense(20, activity_regularizer=l2(0.001))) model.add(Activation("relu")) model.add(Dense(2)) model.compile(loss=mean_absolute_error, optimizer='nadam', metrics=[RootMeanSquaredError(), MAE]) print(model.summary()) batch_size = 12 epochs = 100 reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max') model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc]) model.save("PositionEstimation.h5", overwrite=True) # acc = model.evaluate(X_test, # Y_test, # batch_size=batch_size, # verbose=0) predicted = model.predict(X_test, batch_size=batch_size) # predicted = out.ravel() res = pd.DataFrame({"predicted_x": predicted[:, 0], "predicted_y": predicted[:, 1], "original_x": Y_test[:, 0], "original_y": Y_test[:, 1]}) res.to_excel("res.xlsx")
def dense_net(dataset_object:LSTM_data): X_train, X_test, Y_train, Y_test = dataset_object.get_memory() print(X_test.shape, X_train.shape) X_train = X_train.reshape(X_train.shape[0],X_train.shape[2]) X_test=X_test.reshape(X_test.shape[0], X_test.shape[2]) X_train, X_test = X_train[:, :-12], X_test[:, :-12] print(X_test.shape, X_train.shape) regressor = Sequential() regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dense(units=EPOCHS, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) regressor.add(Dropout(DROPOUT)) regressor.add(Dense(units=1, activation='relu', bias_regularizer=regularizers.l2(BIAIS_REG), activity_regularizer=regularizers.l2(L2) )) optim = Adam() # Compiling the RNN regressor.compile(optimizer=optim, loss='mean_squared_error') # Fitting the RNN to the Training set history= regressor.fit(X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_test, Y_test), callbacks=[EARLY_STOP, REDUCE_LR]) regressor.save("data/weights/dense_no_senti") plot_train_loss(history) evaluate(regressor, X_test,Y_test, dataset_object,name="dense", senti="yes")
def construct_model(self, tuned_params: Dict[str, Union[int, float]], hps: HyperParameters = None) -> Model: hpf = HyperParameterFactory(self.default_parameters_values, tuned_params, hps) max_pool0 = hpf.get_choice(MAXPOOL0_NAME, [1, 2, 4, 8]) max_pool1 = hpf.get_choice(MAXPOOL1_NAME, [1, 2, 4, 8]) max_pool2 = hpf.get_choice(MAXPOOL2_NAME, [1, 2, 4, 8]) filter_0 = hpf.get_choice(FILTER0_NAME, [4, 8, 16, 32]) filter_1 = hpf.get_choice(FILTER1_NAME, [32, 48, 64]) filter_2 = hpf.get_choice(FILTER2_NAME, [64, 96, 128]) dense = hpf.get_int(DENSE_NAME, 32, 128, 8) lr = hpf.get_choice(LEARNING_RATE_NAME, [1e-2, 1e-3, 1e-4]) model = Sequential([ Input(name='MapView_Input', shape=(43, 39, 7)), MaxPooling2D(max_pool0, name='MapView_MaxPool_0'), Conv2D(filter_0, 2, strides=1, activation=tf.nn.relu, name='MapView_Conv2D_1'), MaxPooling2D(max_pool1, name='MapView_MaxPool_1'), Conv2D(filter_1, 3, strides=1, activation=tf.nn.relu, name='MapView_Conv2D_2'), MaxPooling2D(max_pool2, name='MapView_MaxPool_2'), Conv2D(filter_2, 2, strides=1, activation=tf.nn.relu, name='MapView_Conv2D_3'), Flatten(name='MapView_Flatten'), Dropout(0.1, name='MapView_Dropout'), Dense(dense, activation=tf.nn.relu, name='MapView_Dense'), Dense(5, activation=tf.nn.softmax, name='MapView_Output'), ]) loss_fn = tf.keras.losses.CategoricalCrossentropy() opt = tf.keras.optimizers.Adam(learning_rate=lr) model.compile(optimizer=opt, loss=loss_fn, metrics=[tf.keras.metrics.categorical_accuracy]) return model
def get_model(self): model = Sequential() model.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=(self.feature_dim_1, self.feature_dim_2, self.channel))) model.add(Conv2D(64, kernel_size=(2, 2), activation='relu')) model.add(Conv2D(128, kernel_size=(2, 2), activation='relu')) model.add(MaxPool2D(pool_size=(1, 1))) model.add(Dropout(0.5)) model.add(Conv2D(128, kernel_size=(2, 2), activation='relu')) model.add(Conv2D(256, kernel_size=(2, 2), activation='relu')) model.add(MaxPool2D(pool_size=(1, 1))) model.add(Dropout(0.5)) model.add(Conv2D(128, kernel_size=(2, 2), activation='relu')) model.add(Conv2D(256, kernel_size=(4, 4), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(256, kernel_regularizer=regularizers.l2(0.2), activation='relu')) model.add(Dense(32, kernel_regularizer=regularizers.l2(0.2), activation='relu')) model.add(Dense(self.num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='RMSProp', metrics=['accuracy']) return model
def test_seq_to_seq(self): #print (self.get_random_states()) train_x = [] # Data size: 10 x (image + 2 actions) x board/action size train_x = np.random.randint(0, 2, size=(10, 3, 9)) # train_x = [ # [ # [0.1, 1.0], # [0.1, 1.0], # [0.1, 1.0], # [0.1, 1.0], # [0.1, 1.0], # ]] # 1 being the batch size # 10 being the length #train_x = np.random.randint(low=0, high=2, size=(1, 10, 9)) train_y = [[0.11, 0.11, 0.11]] * 10 #train_y = [ 0.11 ] train_y = np.array(train_y) model = Sequential() #model.add(layers.Flatten(input_shape=(3, 9))), #model.add(layers.Embedding(input_shape=(10, 9), )) model.add( layers.LSTM(units=100, input_shape=(3, 9), return_sequences=True)) model.add(layers.Dropout(rate=0.25)) model.add(layers.Dense(50, activation='relu')) model.add(layers.Dense(1, activation=None)) model.compile(optimizer='adam', loss=tf.losses.MSE, metrics=['mae']) print(model.summary()) model.fit(x=train_x, y=train_y, epochs=100, verbose=0) loss = model.evaluate(train_x, train_y, verbose=2) self.assertLess(loss[0], 1e-04)
Lm1 = Sequential() #q1. try without input? Lm1.add(Input(shape=(784, ))) Lm1.add(Dense(num_classes, activation='softmax')) #q2.try SparseCategoricalCrossentropy without one-hot loss_object = tf.keras.losses.categorical_crossentropy optimizer = tf.keras.optimizers.SGD(0.01) # train_loss = tf.keras.metrics.Mean(name='train_loss') #try SparseCategoricalAccuracy train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy') checkpoint_path = "./checkpoints/" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, verbose=1, period=1) #q3 metrics=xxx without []? Lm1.compile(optimizer=optimizer, loss=loss_object, metrics=[train_accuracy]) #q4 train_ds? Lm1.fit(train_ds, epochs=3, callbacks=[cp_callback]) loss, acc = Lm1.evaluate(train_ds) print("saved model, loss: {:5.2f}, acc: {:5.2f}".format(loss, acc))
class FecModel(Model): def __init__(self, loader): self._loader = loader self._num_train = 28709 self._num_val = 7178 self._batch_size = 64 self._num_epoch = 1 self.create_model() def create_model(self): self._model = Sequential() self._model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1))) self._model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) self._model.add(MaxPooling2D(pool_size=(2, 2))) self._model.add(Dropout(0.25)) self._model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) self._model.add(MaxPooling2D(pool_size=(2, 2))) self._model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) self._model.add(MaxPooling2D(pool_size=(2, 2))) self._model.add(Dropout(0.25)) self._model.add(Flatten()) self._model.add(Dense(1024, activation='relu')) self._model.add(Dropout(0.5)) self._model.add(Dense(7, activation='softmax')) self._model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001, decay=1e-6), metrics=['accuracy']) def train_model(self): print('Load train data...') train_generator = self._loader.prepare_train_data() print('Load validation data...') validation_generator = self._loader.prepare_validation_data() model_info = self._model.fit_generator( train_generator, steps_per_epoch=self._num_train // self._batch_size, epochs=self._num_epoch, validation_data=validation_generator, validation_steps=self._num_val // self._batch_size) def evaluate_model(self): print('Load validation data...') evaluate_generator = self._loader.prepare_validation_data() model_info = self._model.evaluate(evaluate_generator, steps=self._num_train // self._batch_size, batch_size=self._batch_size, epochs=self._num_epoch) return model_info def save_model(self): print('Save model...') self._model.save_weights(FecConfig.model_file_name) def load_model(self): print('Load model...') self._model.load_weights(FecConfig.model_file_name) def make_prediction(self, input_folder): processed_images = self._loader.prepare_data(input_folder) print('Predicting data...') results = [] for processed_image in processed_images: for face_image in processed_image.face_images: result = self._model.predict(face_image) results.append(result) print('Save results...') self._loader.save_data(processed_images, results)
class MyAutoEncoder(object): # archType - 1 => 300|256|300 : archType - 2 => 300|128|300 # archType - 3 => 300|64|300 : archType - 4 => 300|32|300 # archType - 5 => 300|16|300 : archType - 6 => 300|128|64|128|300 # archType - 7 => 300|256|128|128|256|300 : archType - 8 => 300|128|64|32|64|128|300 # archType - 9 => 300||256|128|64|128|256|300 : archType - 10 => 300|128|64|32|16|32|64|128|300 # archType - 11 => 300|256|128|64|32|64|128|256|300 : archType - 12 => 300|256|128|64|32|16|32|64|128|256|300 def __init__(self, logFilePath, inputDim=0, archType=0): self.logFilePath = logFilePath if archType == 0: return # We are loading a saved model # Create auto encoder+decoder self.autoEncoderModel = Sequential() self.autoEncoderModel.add( Dense(inputDim, input_shape=(inputDim, ), activation='relu')) # Input layer if archType == 1: self.autoEncoderModel.add(Dense(256, activation='relu')) elif archType == 2: self.autoEncoderModel.add(Dense(128, activation='relu')) elif archType == 3: self.autoEncoderModel.add(Dense(64, activation='relu')) elif archType == 4: self.autoEncoderModel.add(Dense(32, activation='relu')) elif archType == 5: self.autoEncoderModel.add(Dense(16, activation='relu')) elif archType == 6: self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) elif archType == 7: self.autoEncoderModel.add(Dense(256, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(256, activation='relu')) elif archType == 8: self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) elif archType == 9: self.autoEncoderModel.add(Dense(256, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(256, activation='relu')) elif archType == 10: self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(16, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) elif archType == 11: self.autoEncoderModel.add(Dense(256, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(256, activation='relu')) elif archType == 12: self.autoEncoderModel.add(Dense(256, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(16, activation='relu')) self.autoEncoderModel.add(Dense(32, activation='relu')) self.autoEncoderModel.add(Dense(64, activation='relu')) self.autoEncoderModel.add(Dense(128, activation='relu')) self.autoEncoderModel.add(Dense(256, activation='relu')) else: raise ValueError("Incorrect architecture type given.") self.autoEncoderModel.add(Dense(inputDim, activation='relu')) # Output layer self.autoEncoderModel.compile(optimizer='adam', loss=losses.MSE) self.autoEncoderModel.summary() # Create encoder inputSample = Input(shape=(inputDim, )) inputLayer = self.autoEncoderModel.layers[0] if 0 < archType < 6: layerTwo = self.autoEncoderModel.layers[1] self.encoderModel = Model(inputSample, layerTwo(inputLayer(inputSample))) elif archType < 8: layerTwo = self.autoEncoderModel.layers[1] layerThree = self.autoEncoderModel.layers[2] self.encoderModel = Model( inputSample, layerThree(layerTwo(inputLayer(inputSample)))) elif archType < 10: layerTwo = self.autoEncoderModel.layers[1] layerThree = self.autoEncoderModel.layers[2] layerFour = self.autoEncoderModel.layers[3] self.encoderModel = Model( inputSample, layerFour(layerThree(layerTwo(inputLayer(inputSample))))) elif archType < 12: layerTwo = self.autoEncoderModel.layers[1] layerThree = self.autoEncoderModel.layers[2] layerFour = self.autoEncoderModel.layers[3] layerFive = self.autoEncoderModel.layers[4] self.encoderModel = Model( inputSample, layerFive( layerFour(layerThree(layerTwo(inputLayer(inputSample)))))) elif archType == 12: layerTwo = self.autoEncoderModel.layers[1] layerThree = self.autoEncoderModel.layers[2] layerFour = self.autoEncoderModel.layers[3] layerFive = self.autoEncoderModel.layers[4] layerSix = self.autoEncoderModel.layers[5] self.encoderModel = Model( inputSample, layerSix( layerFive( layerFour(layerThree(layerTwo( inputLayer(inputSample))))))) self.encoderModel.summary() def train(self, trainX, batchSize, epochs, isDenoising=False): tic = time.perf_counter() inputLayer = trainX if isDenoising: # add some noise to the input layer inputLayer = trainX + np.random.normal(0, 1, trainX.shape) / 2 self.autoEncoderModel.fit(inputLayer, trainX, epochs=epochs, batch_size=batchSize, shuffle=True, validation_split=0.2) toc = time.perf_counter() with open(self.logFilePath, "a") as resultsWriter: resultsWriter.write( f"AutoEncoder training time: {toc - tic:0.4f} seconds \r") return toc - tic def encode(self, dataX, isTrainData): tic = time.perf_counter() encodedDataX = self.encoderModel.predict(dataX) toc = time.perf_counter() if isTrainData: with open(self.logFilePath, "a") as resultsWriter: resultsWriter.write( f"AutoEncoder training encoding time: {toc - tic:0.4f} seconds \r" ) else: with open(self.logFilePath, "a") as resultsWriter: resultsWriter.write( f"AutoEncoder testing encoding time: {toc - tic:0.4f} seconds \r\r" ) return encodedDataX, toc - tic
class CNNClassifier(ClassifierMixin, BaseMultilayerPerceptron): def __init__(self, hidden_layer_sizes=(100, ), activation="relu", solver='adam', alpha=0.0001, batch_size='auto', learning_rate="constant", learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=1e-4, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8, n_iter_no_change=10, max_fun=15000, conf=None): super().__init__(hidden_layer_sizes=hidden_layer_sizes, activation=activation, solver=solver, alpha=alpha, batch_size=batch_size, learning_rate=learning_rate, learning_rate_init=learning_rate_init, power_t=power_t, max_iter=max_iter, loss='log_loss', shuffle=shuffle, random_state=random_state, tol=tol, verbose=verbose, warm_start=warm_start, momentum=momentum, nesterovs_momentum=nesterovs_momentum, early_stopping=early_stopping, validation_fraction=validation_fraction, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, n_iter_no_change=n_iter_no_change, max_fun=max_fun) # Load model self.conf = conf self.logger = loggerElk(__name__, True) # Building the model self.classifier = Sequential() # Creating the method for model # Step 1- Convolution self.classifier.add( Convolution2D(128, (5, 5), input_shape=(self.conf.nn_image_size, self.conf.nn_image_size, 1), activation='relu')) # adding another layer self.classifier.add(Convolution2D(64, (4, 4), activation='relu')) # Pooling it self.classifier.add(MaxPooling2D(pool_size=(2, 2))) # Adding another layer self.classifier.add(Convolution2D(32, (3, 3), activation='relu')) # Pooling self.classifier.add(MaxPooling2D(pool_size=(2, 2))) # Adding another layer self.classifier.add(Convolution2D(32, (3, 3), activation='relu')) # Pooling self.classifier.add(MaxPooling2D(pool_size=(2, 2))) # Step 2- Flattening self.classifier.add(Flatten()) # Step 3- Full connection self.classifier.add(Dense(units=128, activation='relu')) # For the output step self.classifier.add( Dense(units=self.conf.nn_class_size, activation='softmax')) self.classifier.add(Dropout(0.02)) # Add reularizers # classifier.add(Dense(128, # input_dim = 128, # kernel_regularizer = regularizers.l1(0.001), # activity_regularizer = regularizers.l1(0.001), # activation = 'relu')) self.classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # dropout = classifier.add(Dropout(0.2)) def save_nn(self): try: dir_path = os.path.join(self.conf.working_path, self.conf.nn_model_name) if os.path.exists(dir_path): shutil.rmtree(dir_path) os.makedirs(dir_path) save_model(self.classifier, filepath=dir_path, overwrite=True) except Exception as exc: self.logger.Error(exc) def load_nn(self): try: dir_path = os.path.join(self.conf.working_path, self.conf.nn_model_name) self.classifier = load_model(filepath=dir_path) except Exception as exc: self.logger.Error(exc) def fit(self, training_set, validation_set): """ Fit the model to data matrix X and target(s) y. """ check_pointer = callbacks.ModelCheckpoint( filepath=self.conf.working_path, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) history = self.classifier.fit_generator( training_set, steps_per_epoch=(training_set.n / 32), epochs=self.conf.nn_epochs, validation_data=validation_set, validation_steps=(validation_set.n / 32), callbacks=[check_pointer]) @property def partial_fit(self): """Update the model with a single iteration over the given data. classes : array, shape (n_classes), default None Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. Returns ------- self : returns a trained MLP model. """ # if self.solver not in _STOCHASTIC_SOLVERS: # raise AttributeError("partial_fit is only available for stochastic" # " optimizer. %s is not stochastic" # % self.solver) # return self._partial_fit return def _partial_fit(self, X, y, classes=None): # if _check_partial_fit_first_call(self, classes): # self._label_binarizer = LabelBinarizer() # if type_of_target(y).startswith('multilabel'): # self._label_binarizer.fit(y) # else: # self._label_binarizer.fit(classes) # # super()._partial_fit(X, y) # # return self pass def _validate_input(self, X, y, incremental): X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], multi_output=True) if y.ndim == 2 and y.shape[1] == 1: y = column_or_1d(y, warn=True) if not incremental: self._label_binarizer = LabelBinarizer() self._label_binarizer.fit(y) self.classes_ = self._label_binarizer.classes_ elif self.warm_start: classes = unique_labels(y) if set(classes) != set(self.classes_): raise ValueError( "warm_start can only be used where `y` has " "the same classes as in the previous " "call to fit. Previously got %s, `y` has %s" % (self.classes_, classes)) else: classes = unique_labels(y) if len(np.setdiff1d(classes, self.classes_, assume_unique=True)): raise ValueError("`y` has classes not in `self.classes_`." " `self.classes_` has %s. 'y' has %s." % (self.classes_, classes)) y = self._label_binarizer.transform(y) return X, y def predict(self, X): """Predict using the multi-layer perceptron classifier Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- y : array-like, shape (n_samples,) or (n_samples, n_classes) The predicted classes. """ # check_is_fitted(self) # y_pred = self._predict(X) # # if self.n_outputs_ == 1: # y_pred = y_pred.ravel() # # return self._label_binarizer.inverse_transform(y_pred) y_pred = self.classifier.predict(X) return y_pred def predict_log_proba(self, X): """Return the log of probability estimates. Parameters ---------- X : array-like, shape (n_samples, n_features) The input data. Returns ------- log_y_prob : array-like, shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. Equivalent to log(predict_proba(X)) """ # y_prob = self.predict_proba(X) # return np.log(y_prob, out=y_prob) pass def predict_proba(self, X): """Probability estimates. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. Returns ------- y_prob : array-like, shape (n_samples, n_classes) The predicted probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ check_is_fitted(self) y_pred = self.classifier.predict_proba(X) if self.n_outputs_ == 1: y_pred = y_pred.ravel() if y_pred.ndim == 1: return np.vstack([1 - y_pred, y_pred]).T else: return y_pred
model.add(Conv2D(32, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, 3, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) print("Training model...") history = model.fit_generator( train_data_gen, steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))), epochs=EPOCHS, validation_data=val_data_gen, validation_steps=int(np.ceil(val_data_gen.n / float(batch_size)))) print("Training complete") acc = history.history['accuracy'] # tensorflow 1.0 = 'acc', 2.0 = 'accuracy' val_acc = history.history['val_accuracy'] loss = history.history['loss']