def build_classifier(output_layers): classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(output_layers) return classifier
print("x_train shape: {}".format(x_train.shape)) print("y_train shape: {}".format(y_train.shape)) #print("x_train: {}".format(x_train[1:])) #print("y_train: {}".format(y_train[1:])) #y_map #from keras.callbacks import ModelCheckpoint #filepath="weights.best.hdf5" #checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') # ## Create the neural network definition # In[11]: classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(len(y_map)) train_losses, val_losses, fbeta_score = classifier.train_model( x_train, y_train, epochs, batch_size, validation_split_size=validation_split_size) classifier.load_weight() print("Weights loaded") # ## Monitor the results
# <codecell> batch_size = 64 epochs_arr = [35, 15, 5] learn_rates = [0.002, 0.0002, 0.00002] # <markdowncell> # ## Define and Train model # # Here we define the model and begin training. # <codecell> classifier = AmazonKerasClassifier(preprocessor) classifier.add_conv_layer() classifier.add_flatten_layer() classifier.add_ann_layer(len(preprocessor.y_map)) train_losses, val_losses = [], [] for learn_rate, epochs in zip(learn_rates, epochs_arr): tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_model(learn_rate, epochs, batch_size, train_callbacks=[checkpoint, fbeta, csv]) train_losses += tmp_train_losses val_losses += tmp_val_losses print(fbeta.fbeta) # <markdowncell>