def train_and_assess_model(model, train_images, train_labels, test_images, test_labels): epochs = 50 learning_rate = 0.01 decay = learning_rate / epochs sgd = SGD(lr=learning_rate, momentum=0.9, decay=decay, nesterov=False) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(model.summary()) model.fit(train_images, train_labels, validation_data=(train_images, train_labels), epochs=epochs, batch_size=12) print(model.input.op.name) print(model.output.op.name) frozen_graph = freeze_graph(K.get_session(), output_names=[model.output.op.name]) tf.train.write_graph(frozen_graph, '.', 'emotions_detector.pb', as_text=False) scores = model.evaluate(test_images, test_labels, verbose=0) print('Accuracy: %.2f%%' % scores[1] * 100) return model
def train_and_assess_model(model, train_images, train_labels): epochs = 60 learning_rate = 0.007 decay = learning_rate / epochs sgd = SGD(lr=learning_rate, momentum=0.81, decay=decay, nesterov=False) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) print(model.summary()) model.fit(train_images, train_labels, validation_data=(train_images, train_labels), epochs=epochs, batch_size=12) print(model.input.op.name) print(model.output.op.name) frozen_graph = freeze_graph(K.get_session(), output_names=[model.output.op.name]) tf.train.write_graph( frozen_graph, '.', 'Android_App/app/src/main/assets/emotions_detector.pb', as_text=False) return model
def run(): happy_images = load_and_format_images('Happy') sad_images = load_and_format_images('Sad') train_images, train_labels, test_images, test_labels = separate_data( happy_images, sad_images) train_images = reshape_images(train_images) test_images = reshape_images(test_images) model = create_model() model = train_and_assess_model(model, train_images, train_labels) predict_images(model, test_images, test_labels) plt.show(freeze_graph(K.get_session(), output_names=[model.output.op.name]))
kernel_constraint=maxnorm(3))) # Get the max value of each 2 x 2 block and discard the rest, thus changing image size to 16 x 16 model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Conv2D(64, (3, 3), input_shape=(16, 16, 32), activation='relu', padding='same', kernel_constraint=maxnorm(3))) # model.add(Conv2D(128, (3, 3), input_shape=(16, 16, 64), activation='relu', padding='same', kernel_constraint=maxnorm(3))) # Flatten the matrix into an array model.add(Flatten()) # Dense layer creates 512 connected neurons (32 x 16) with relu activation function model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3))) # Dropout layer prevents overfitting by ignoring outputs from some neurons model.add(Dropout(0.5)) # Last dense layer sorts the features into 10 possible outputs with softmax activation model.add(Dense(10, activation='softmax')) # Specify loss function, optimizer, and which metrics we want to keep track of model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy']) model.fit(train_images, train_labels, epochs=30, batch_size=32) frozen_graph = freeze_graph(K.get_session(), output_names=[model.output.op.name]) tf.io.write_graph(frozen_graph, '.', 'cifar10.pb', as_text=False)