def train_face(): train_images = help_functions.load_all_images( config.chokepoint_cropped_train, file_type='.pgm', preprocess=help_functions.prepare_face_random_resize) # Hyper-parameters people_count = 8 iterations = 25000 checkpoint = 20 save_checkpoint = 10000 backend.clear_session() model = get_face_model((config.face_image_resize[1], config.face_image_resize[0], 1)) f = open(os.path.join('model_history', 'face_perf.txt'), 'a') # print(model.summary()) for i in range(1, iterations+1): inputs, targets = get_image_pairs(train_images, people_count) (loss, acc) = model.train_on_batch(inputs, targets) if i % checkpoint == 0: logging.info('Iteration: {}'.format(i)) logging.info('Loss: {}'.format(loss)) logging.info('Accuracy: {}'.format(acc)) f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n') if i % save_checkpoint == 0: model.save_weights(os.path.join('model_history', 'base_face_weights_it{}.h5'.format(i))) f.flush() model.save_weights(os.path.join('model_history', 'base_face_weights.h5')) f.close()
def test_body(model_file, image_folder=config.test_body_folder): test_images = help_functions.load_all_images( image_folder, preprocess=help_functions.prepare_body) model = get_body_model( (config.body_image_resize[1], config.body_image_resize[0], 3)) model.load_weights(filepath=model_file) print('body') rates = np.array([0, 0, 0, 0]) for i in range(100): inputs, targets = train_help_functions.get_image_pairs(test_images, 10) predicted = model.predict_on_batch(inputs) rates += calc_rates(predicted, targets) print_rates(rates)
def improve_bodies(): # same person in different folders might have different ID, causing false negatives if we connected them train_images = [] # load images from each folder for group_name in config.improve_camera_groups: images = help_functions.load_all_images( os.path.join(config.improve_folder, group_name, 'bodies'), file_type='.jpg', preprocess=help_functions.prepare_body) train_images.append(images) # hyper-parameters people_count = 8 iterations = 8000 checkpoint = 2 save_checkpoint = 5000 model = siamese_network.get_body_model( (config.body_image_resize[1], config.body_image_resize[0], 3)) # are we improving base model of already improved model? # load weight for model we are improving if config.learning_start: model.load_weights(filepath=config.base_body_model) elif config.learning_improving: model.load_weights(filepath=config.improved_body_model) f = open(os.path.join('model_history', 'body_improve_perf.txt'), 'a') logging.info('IMPROVING: Starting to improve model for bodies') for i in range(1, iterations + 1): inputs, targets = get_image_pairs( train_images[np.random.randint(0, len(train_images))], people_count) (loss, acc) = model.train_on_batch(inputs, targets) if i % checkpoint == 0: logging.info('Iteration: {}'.format(i)) logging.info('Loss: {}'.format(loss)) logging.info('Accuracy: {}'.format(acc)) f.write(str(i) + ' ' + str(loss) + ' ' + str(acc) + '\n') if i % save_checkpoint == 0: model.save_weights(os.path.join('model_history', str(i) + 'FBI.h5')) f.flush() model.save_weights(config.improved_body_model) f.close()
def test_face(model_file, image_folder=config.chokepoint_cropped_test, file_type='.pgm'): test_images = help_functions.load_all_images( image_folder, file_type=file_type, preprocess=help_functions.prepare_face) model = get_face_model( (config.face_image_resize[1], config.face_image_resize[0], 1)) model.load_weights(filepath=model_file) print('face') rates = np.array([0, 0, 0, 0]) for i in range(100): inputs, targets = train_help_functions.get_image_pairs(test_images, 10) if targets.shape[0] == 0: continue predicted = model.predict_on_batch(inputs) rates += calc_rates(predicted, targets) print_rates(rates)