def training_phase_2(self): """ Bootstrap training. """ latest = self.trained_models_dir + "cp-b2.ckpt" print("\ncheckpoint: ", latest) # Build and compile model: model = self.inverseNet.model model.load_weights(latest) # Build and compile model: self.inverseNet.compile() model = self.inverseNet.model keras_ds = LoadDataset().load_dataset_batches() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(buffer_size=self.AUTOTUNE) steps_per_epoch = tf.math.ceil(self.SHUFFLE_BUFFER_SIZE / self.BATCH_SIZE).numpy() print("Training with %d steps per epoch" % steps_per_epoch) history_1 = model.fit(keras_ds, epochs=300, steps_per_epoch=steps_per_epoch, callbacks=[self.cp_callback, self.cp_stop]) self.history_list.append(history_1)
def training_phase_12(self): """ Continue training from latest checkpoint """ # load latest checkpoint and continue training latest = tf.train.latest_checkpoint(self.checkpoint_dir) print("\ncheckpoint: ", latest) # Build and compile model: model = self.inverseNet.model model.load_weights(latest) self.inverseNet.compile() model = self.inverseNet.model keras_ds = LoadDataset().load_dataset_batches() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(buffer_size=self.AUTOTUNE) steps_per_epoch = tf.math.ceil(self.SHUFFLE_BUFFER_SIZE / self.BATCH_SIZE).numpy() print("Training with %d steps per epoch" % steps_per_epoch) history_1 = model.fit(keras_ds, epochs=2000, steps_per_epoch=steps_per_epoch, callbacks=[self.cp_callback, self.cp_stop]) self.history_list.append(history_1)
def training_phase_21(self): # load latest checkpoint and continue training latest = tf.train.latest_checkpoint(self.checkpoint_dir) # latest = self.trained_models_dir + "cp-0205.ckpt" print("\ncheckpoint: ", latest) # Build and compile model: model = self.inverseNet.model model.load_weights(latest) # Build and compile model: self.inverseNet.compile() model = self.inverseNet.model # with tf.device('/device:CPU:0'): keras_ds = LoadDataset().load_dataset_batches() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(buffer_size=self.AUTOTUNE) steps_per_epoch = tf.math.ceil(self.SHUFFLE_BUFFER_SIZE / self.BATCH_SIZE).numpy() print("Training with %d steps per epoch" % steps_per_epoch) # with tf.device('/device:CPU:0'): history_1 = model.fit(keras_ds, epochs=240, steps_per_epoch=steps_per_epoch, callbacks=[self.cp_callback, self.cp_stop]) self.history_list.append(history_1)
def evaluate_model(self): """ Evaluate model on validation data """ keras_ds = LoadDataset().load_data_for_expression_evaluate() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(buffer_size=self.AUTOTUNE) loss, acc = self.model.evaluate(keras_ds, steps=100) print("\nRestored model, Loss: {0} \nAccuracy: {1}\n".format(loss, acc))
def training(self): self.compile() model = self.model keras_ds = LoadDataset().load_data_for_expression() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(buffer_size=self.AUTOTUNE) steps_per_epoch = tf.math.ceil(self.SHUFFLE_BUFFER_SIZE / self.BATCH_SIZE).numpy() print("Training with %d steps per epoch" % steps_per_epoch) history_1 = model.fit(keras_ds, epochs=500, steps_per_epoch=steps_per_epoch, callbacks=[self.cp_callback, self.cp_stop]) self.history_list.append(history_1)
def evaluate_model(self): """ Evaluate model on validation data """ with tf.device('/device:CPU:0'): test_ds = LoadDataset().load_dataset_single_image(self._case) loss, mse, mae = self.model.evaluate(test_ds) print("\nRestored model, Loss: {0} \nMean Squared Error: {1}\n" "Mean Absolute Error: {2}\n".format(loss, mse, mae))
def model_predict(self, image_path): """ Predict out of image_path :param image_path: path :return: """ image = LoadDataset().load_and_preprocess_image_4d(image_path) x = self.model.predict(image) return np.transpose(x)
def training_phase_1(self): """ Start training with ImageNet weights on the SyntheticDataset """ # Build and compile model: self.inverseNet.compile() model = self.inverseNet.model keras_ds = LoadDataset().load_dataset_batches() keras_ds = keras_ds.shuffle(self.SHUFFLE_BUFFER_SIZE).repeat().batch( self.BATCH_SIZE).prefetch(None) steps_per_epoch = tf.math.ceil(self.SHUFFLE_BUFFER_SIZE / self.BATCH_SIZE).numpy() print("Training with %d steps per epoch" % steps_per_epoch) history_1 = model.fit(keras_ds, epochs=2000, steps_per_epoch=steps_per_epoch, callbacks=[self.cp_callback]) self.history_list.append(history_1)
from SE_Inception_V4 import SEInceptionV4 from LoadDataset import LoadDataset from sklearn.model_selection import train_test_split import numpy as np X, y = LoadDataset() model = SEInceptionV4(include_top=True, input_shape=(256, 256, 3), classes=4) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0, random_state=42) history = model.fit(np.array(train_x), np.array(train_y), epochs=25, batch_size=32) model.save('model.h5')
def accuracy(predicted, actual): total = 0.0 correct = 0.0 for p, a in zip(predicted, actual): total += 1 if p == a: correct += 1 print correct/total return correct / total if __name__ == '__main__': load = LoadDataset() load_x, load_y = [], [] for sets in load.return_dataset(): load_x.append(create_shared(sets)[0]) load_y.append(create_shared(sets)[1]) weight3_shape = (10, 40) bias3_shape = 10 weight1_shape = (40, 784) bias1_shape = 40 weight2_shape = (40, 40) bias2_shape = 40 #Creating Shared variables for weight and bias