def pick_model_weights(model: keras.models.Model, dataset_name, path='../output/Models/Weights'): model_names = [] for path in glob.glob('{}/{}_*.h5'.format(path, dataset_name)): model_names.append(path) print('Please enter you model number form list below:') for i, path in enumerate(model_names): print('{}. {}'.format(i + 1, path)) model_number = int(input('?')) - 1 model.load_weights(model_names[model_number]) return model
def train(model: keras.models.Model, optimizer: dict, save_path: str, train_dir: str, valid_dir: str, batch_size: int = 32, epochs: int = 10, samples_per_epoch=1000, pretrained=None, augment: bool = True, weight_mode=None, verbose=0, **kwargs): """ Trains the model with the given configurations. """ shape = model.input_shape[1:3] optimizer_cpy = optimizer.copy() shared_gen_args = { 'rescale': 1. / 255, # to preserve the rgb palette } train_gen_args = {} if augment: train_gen_args = { "fill_mode": 'reflect', 'horizontal_flip': True, 'vertical_flip': True, 'width_shift_range': .15, 'height_shift_range': .15, 'shear_range': .5, 'rotation_range': 45, 'zoom_range': .2, } gen = IDG(**{**shared_gen_args, **train_gen_args}) gen = gen.flow_from_directory(train_dir, target_size=shape, batch_size=batch_size, seed=SEED) val_count = len( glob(os.path.join(valid_dir, '**', '*.jpg'), recursive=True)) valid_gen = IDG(**shared_gen_args) optim = getattr(keras.optimizers, optimizer['name']) if optimizer.pop('name') != 'sgd': optimizer.pop('nesterov') schedule = optimizer.pop('schedule') if schedule == 'decay' and 'lr' in optimizer.keys(): initial_lr = optimizer.pop('lr') else: initial_lr = 0.01 optim = optim(**optimizer) callbacks = [ utils.checkpoint(save_path), utils.csv_logger(save_path), ] if pretrained is not None: if not os.path.exists(pretrained): raise FileNotFoundError() model.load_weights(pretrained, by_name=False) if verbose == 1: print("Loaded weights from {}".format(pretrained)) if optimizer_cpy['name'] == 'sgd': if schedule == 'decay': callbacks.append(utils.step_decay(epochs, initial_lr=initial_lr)) elif schedule == 'big_drop': callbacks.append(utils.constant_schedule()) model.compile(optim, loss='categorical_crossentropy', metrics=['accuracy', top3_acc]) create_xml_description(save=os.path.join(save_path, 'model_config.xml'), title=model.name, epochs=epochs, batch_size=batch_size, samples_per_epoch=samples_per_epoch, augmentations=augment, schedule=schedule, optimizer=optimizer_cpy, **kwargs) if weight_mode: class_weights = [[key, value] for key, value in weight_mode.items()] filen = os.path.join(save_path, 'class_weights.npy') np.save(filen, class_weights) h = None # has to be initialized here, so we can reference it later try: with warnings.catch_warnings(): warnings.simplefilter("ignore") h = model.fit_generator( gen, steps_per_epoch=samples_per_epoch / batch_size, epochs=epochs, validation_data=valid_gen.flow_from_directory( valid_dir, target_size=shape, batch_size=batch_size, seed=SEED), validation_steps=val_count / batch_size, callbacks=callbacks, class_weight=weight_mode, verbose=2) except KeyboardInterrupt: save_results(verbose=1, save_path=save_path, model=model, hist=h) return save_results(verbose=1, save_path=save_path, model=model, hist=h)