print("x_train shape: {}".format(x_train.shape)) print("y_train shape: {}".format(y_train.shape)) #print("x_train: {}".format(x_train[1:])) #print("y_train: {}".format(y_train[1:])) #y_map #from keras.callbacks import ModelCheckpoint #filepath="weights.best.hdf5" #checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') # ## Create the neural network definition # In[11]: classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(len(y_map)) train_losses, val_losses, fbeta_score = classifier.train_model( x_train, y_train, epochs, batch_size, validation_split_size=validation_split_size) classifier.load_weight() print("Weights loaded") # ## Monitor the results
def build_classifier(output_layers): classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(output_layers) return classifier
# <codecell> validation_split_size = 0.2 batch_size = 128 # <markdowncell> # ## Define and Train model # # Here we define the model and begin training. # # Note that we have created a learning rate annealing schedule with a series of learning rates as defined in the array `learn_rates` and corresponding number of epochs for each `epochs_arr`. Feel free to change these values if you like or just use the defaults. # <codecell> classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(len(y_map)) train_losses, val_losses = [], [] epochs_arr = [10, 5, 5] learn_rates = [0.001, 0.0001, 0.00001] for learn_rate, epochs in zip(learn_rates, epochs_arr): tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_model( x_train, y_train, learn_rate, epochs, batch_size, validation_split_size=validation_split_size,
x_valid = HDF5Matrix(h5_train_file, "x_train", start=N_split, end=N_train) y_valid = HDF5Matrix(h5_train_file, "y_train", start=N_split, end=N_train) y = HDF5Matrix(h5_train_file, "y_train") counts = np.zeros((17)) for i in range(len(y)): counts = counts + y[i] w_sample = dict(zip([x for x in range(len(counts))], 1000 / counts)) #np.vectorize(lambda x: y_map.get(x,0))(y_train) # <codecell> # <markdowncell> # ## Define and Train model # Here we define the model and begin training. # Note that we have created a learning rate annealing schedule with a series of learning rates as defined in the array `learn_rates` and corresponding number of epochs for each `epochs_arr`. Feel free to change these values if you like or just use the defaults. classifier = AmazonKerasClassifier() #classifier.load_model(model_filepath+".json") # load model classifier.add_conv_layer(img_resize, img_channels) classifier.add_flatten_layer() classifier.add_ann_layer(len(y_map)) classifier.summary() classifier.save_model(model_filepath + ".json") checkpoint = ModelCheckpoint(model_filepath + ".hdf5", monitor='val_acc', verbose=1, save_best_only=True) train_losses, val_losses = [], [] epochs_arr = [15, 7, 7] learn_rates = [0.001, 0.0001, 0.00001]
# <codecell> batch_size = 64 epochs_arr = [35, 15, 5] learn_rates = [0.002, 0.0002, 0.00002] # <markdowncell> # ## Define and Train model # # Here we define the model and begin training. # <codecell> classifier = AmazonKerasClassifier(preprocessor) classifier.add_conv_layer() classifier.add_flatten_layer() classifier.add_ann_layer(len(preprocessor.y_map)) train_losses, val_losses = [], [] for learn_rate, epochs in zip(learn_rates, epochs_arr): tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_model(learn_rate, epochs, batch_size, train_callbacks=[checkpoint, fbeta, csv]) train_losses += tmp_train_losses val_losses += tmp_val_losses print(fbeta.fbeta) # <markdowncell>
validation_split_size = 0.2 batch_size = 128 x_train, y_train, y_map = data_helper.preprocess_train_data(train_jpeg_dir, train_csv_file, img_resize) # Free up all available memory space after this heavy operation gc.collect(); print("x_train shape: {}".format(x_train.shape)) print("y_train shape: {}".format(y_train.shape)) from tensorflow.contrib.keras.api.keras.callbacks import ModelCheckpoint filepath="weights.best.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True) classifier = AmazonKerasClassifier() classifier.add_conv_layer(img_resize) classifier.add_flatten_layer() classifier.add_ann_layer(len(y_map)) train_losses, val_losses = [], [] epochs_arr = [20, 5, 5] learn_rates = [0.001, 0.0001, 0.00001] for learn_rate, epochs in zip(learn_rates, epochs_arr): tmp_train_losses, tmp_val_losses, fbeta_score = classifier.train_model(x_train, y_train, learn_rate, epochs, batch_size, validation_split_size=validation_split_size, train_callbacks=[checkpoint]) train_losses += tmp_train_losses val_losses += tmp_val_losses classifier.load_weights("weights.best.hdf5") print("Weights loaded")