train_datagen = create_image_generator(True, True) # this is the augmentation configuration we will use for testing: # only rescaling test_datagen = create_image_generator(True, False) attr.train_generator = train_datagen.flow_from_directory( attr.train_data_dir, target_size=(attr.img_width, attr.img_height), batch_size=attr.batch_size, shuffle=True, class_mode='binary') attr.validation_generator = test_datagen.flow_from_directory( attr.validation_data_dir, target_size=(attr.img_width, attr.img_height), batch_size=attr.batch_size, shuffle=True, class_mode='binary') attr.test_generator = test_datagen.flow_from_directory( attr.test_data_dir, target_size=(attr.img_width, attr.img_height), batch_size=1, shuffle=False, class_mode='binary') # calculate steps based on number of images and batch size attr.calculate_steps() attr.increment_seq()
plot_model(attr.model, to_file=attr.summ_basename + '-architecture.png') # compile model using accuracy as main metric, rmsprop (gradient descendent) attr.model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.000001), metrics=['accuracy']) # this is the augmentation configuration we will use for training train_datagen = create_image_generator(False, True) # this is the augmentation configuration we will use for testing: # nothing is done. test_datagen = create_image_generator(False, False) attr.train_generator = multimodal_flow_from_directory_generator(attr.train_data_dir, attr.csv_path, train_datagen, attr.batch_size, attr.img_height, attr.img_width, 'binary', True) attr.validation_generator = multimodal_flow_from_directory_generator(attr.validation_data_dir, attr.csv_path, test_datagen, attr.batch_size, attr.img_height, attr.img_width, 'binary', True) attr.test_generator = multimodal_flow_from_directory_generator(attr.test_data_dir, attr.csv_path, test_datagen, 1, attr.img_height, attr.img_width, 'binary', False) print("[INFO] Calculating samples and steps...") attr.calculate_samples_len() attr.calculate_steps() attr.increment_seq() # Persist execution attributes for session resume save_execution_attributes(attr, attr.summ_basename + '-execution-attributes.properties') time_callback = TimeCallback() callbacks = [time_callback, EarlyStopping(monitor='val_acc', patience=3, mode='max', restore_best_weights=True),
is_categorical=True, is_debug=False, width_shift=0.2, height_shift=0.2, rotation_angle=15, shear_factor=10, zoom_factor=0.2) attr.validation_generator = MultimodalGenerator(npy_path=attr.numpy_path + '/valid-categorical.npy', batch_size=attr.batch_size, height=attr.img_height, width=attr.img_width, channels=3, classes=2, should_shuffle=True, is_categorical=True, is_debug=False, width_shift=0.2, height_shift=0.2, rotation_angle=15, shear_factor=10, zoom_factor=0.2) attr.test_generator = MultimodalGenerator(npy_path=attr.numpy_path + '/test-categorical.npy', batch_size=1, height=attr.img_height, width=attr.img_width, channels=3, classes=2,
attr.calculate_steps() attr.increment_seq() # Persist execution attributes for session resume save_execution_attributes(attr, attr.summ_basename + '-execution-attributes.properties') # this is the augmentation configuration we will use for training train_datagen = create_image_generator(False, True) # this is the augmentation configuration we will use for testing: # nothing is done. test_datagen = create_image_generator(False, False) attr.train_generator = multimodal_flow_generator(images_train, attributes_train, labels_train, train_datagen, attr.batch_size) attr.validation_generator = multimodal_flow_generator(images_valid, attributes_valid, labels_valid, test_datagen, attr.batch_size) attr.test_generator = multimodal_flow_generator(images_test, attributes_test, labels_test, test_datagen, 1) time_callback = TimeCallback() callbacks = [time_callback, EarlyStopping(monitor='val_acc', patience=10, mode='max', restore_best_weights=True), ModelCheckpoint(attr.curr_basename + "-ckweights.h5", mode='max', verbose=1, monitor='val_acc', save_best_only=True)] # training time history = attr.model.fit_generator( attr.train_generator, steps_per_epoch=attr.steps_train, epochs=attr.epochs, validation_data=attr.validation_generator, validation_steps=attr.steps_valid,
# prepare data augmentation configuration train_datagen = create_image_generator(True, True) test_datagen = create_image_generator(True, False) attr.train_generator = train_datagen.flow_from_directory( attr.train_data_dir, target_size=(attr.img_height, attr.img_width), batch_size=attr.batch_size, class_mode='categorical', shuffle=True) # Create a generator for prediction attr.validation_generator = test_datagen.flow_from_directory( attr.validation_data_dir, target_size=(attr.img_height, attr.img_width), batch_size=attr.batch_size, class_mode='categorical', shuffle=True) attr.test_generator = test_datagen.flow_from_directory( attr.test_data_dir, target_size=(attr.img_height, attr.img_width), batch_size=1, class_mode='categorical', shuffle=False) # calculate steps based on number of images and batch size attr.calculate_steps() attr.increment_seq() time_callback = TimeCallback()