def main(): args = TrainData.parse_args(create_parser(usage)) inject_params(args.model) save_params(args.model) data = TrainData.from_both(args.db_file, args.db_folder, args.data_dir) print('Data:', data) (inputs, outputs), test_data = data.load(True, not args.no_validation) print('Inputs shape:', inputs.shape) print('Outputs shape:', outputs.shape) if test_data: print('Test inputs shape:', test_data[0].shape) print('Test outputs shape:', test_data[1].shape) if 0 in inputs.shape or 0 in outputs.shape: print('Not enough data to train') exit(1) model = create_model(args.model, args.no_validation, args.extra_metrics) model.summary() from keras.callbacks import ModelCheckpoint checkpoint = ModelCheckpoint(args.model, monitor=args.metric_monitor, save_best_only=args.save_best) try: model.fit(inputs, outputs, 5000, args.epochs, validation_data=test_data, callbacks=[checkpoint]) except KeyboardInterrupt: print() finally: model.save(args.model)
def main(): args = create_parser(usage).parse_args() import numpy as np model_data = { name: Stats.from_np_dict(data) for name, data in np.load(args.input_file)['data'].item().items() } model_name = args.model_key or basename(splitext(args.model)[0]) if model_name not in model_data: print("Could not find model '{}' in saved models in stats file: {}". format(model_name, list(model_data))) raise SystemExit(1) stats = model_data[model_name] save_spots = (stats.outputs != 0) & (stats.outputs != 1) if save_spots.sum() == 0: print('No data (or all NaN)') return stats.outputs = stats.outputs[save_spots] stats.targets = stats.targets[save_spots] inv = -np.log(1 / stats.outputs - 1) pos = np.extract(stats.targets > 0.5, inv) pos_mu = pos.mean().item() pos_std = sqrt(np.mean((pos - pos_mu)**2)) * args.smoothing print('Peak: {:.2f} mu, {:.2f} std'.format(pos_mu, pos_std)) pr = inject_params(args.model) pr.__dict__.update(threshold_config=((pos_mu, pos_std), )) save_params(args.model) print('Saved params to {}.params'.format(args.model))
def __init__(self, parser=None): parser = parser or ArgumentParser() add_to_parser(parser, self.usage, True) args = TrainData.parse_args(parser) self.args = args = self.process_args(args) or args if args.invert_samples and not args.samples_file: parser.error( 'You must specify --samples-file when using --invert-samples') if args.samples_file and not isfile(args.samples_file): parser.error('No such file: ' + (args.invert_samples or args.samples_file)) if not 0.0 <= args.sensitivity <= 1.0: parser.error('sensitivity must be between 0.0 and 1.0') output_folder = os.path.join(args.folder, splitext(args.model)[0]) if not os.path.exists(output_folder): print('Creating output folder:', output_folder) os.makedirs(output_folder) args.model = os.path.join(output_folder, args.model) inject_params(args.model) save_params(args.model) self.train, self.test = self.load_data(self.args) set_loss_bias(1.0 - args.sensitivity) params = ModelParams(skip_acc=args.no_validation, extra_metrics=args.extra_metrics) self.model = create_model(args.model, params) self.model.summary() from keras.callbacks import ModelCheckpoint, TensorBoard checkpoint = ModelCheckpoint(args.model, monitor=args.metric_monitor, save_best_only=args.save_best) epoch_file = splitext(args.model)[0] epoch_file = os.path.join(epoch_file + '.epoch') epoch_fiti = Fitipy(epoch_file) self.epoch = epoch_fiti.read().read(0, int) def on_epoch_end(a, b): self.epoch += 1 epoch_fiti.write().write(self.epoch, str) self.model_base = splitext(self.args.model)[0] if args.samples_file: self.samples, self.hash_to_ind = self.load_sample_data( args.samples_file, self.train) else: self.samples = set() self.hash_to_ind = {} self.callbacks = [ checkpoint, TensorBoard(log_dir=self.model_base + '.logs', ), LambdaCallback(on_epoch_end=on_epoch_end) ]
def run(self): """Train the model on randomly generated batches""" _, test_data = self.data.load(train=False, test=True) try: self.model.fit_generator(self.samples_to_batches( self.generate_samples(), self.args.batch_size), steps_per_epoch=self.args.steps_per_epoch, epochs=self.epoch + self.args.epochs, validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch) finally: self.model.save(self.args.model) save_params(self.args.model)
def __init__(self, args): super().__init__(args) if args.invert_samples and not args.samples_file: raise ValueError( 'You must specify --samples-file when using --invert-samples') if args.samples_file and not isfile(args.samples_file): raise ValueError('No such file: ' + (args.invert_samples or args.samples_file)) if not 0.0 <= args.sensitivity <= 1.0: raise ValueError('sensitivity must be between 0.0 and 1.0') inject_params(args.model) save_params(args.model) params = ModelParams(skip_acc=args.no_validation, extra_metrics=args.extra_metrics, loss_bias=1.0 - args.sensitivity, freeze_till=args.freeze_till) self.model = create_model(args.model, params) self.train, self.test = self.load_data(self.args) from keras.callbacks import ModelCheckpoint, TensorBoard checkpoint = ModelCheckpoint(args.model, monitor=args.metric_monitor, save_best_only=args.save_best) epoch_fiti = Fitipy(splitext(args.model)[0] + '.epoch') self.epoch = epoch_fiti.read().read(0, int) def on_epoch_end(_a, _b): self.epoch += 1 epoch_fiti.write().write(self.epoch, str) self.model_base = splitext(self.args.model)[0] if args.samples_file: self.samples, self.hash_to_ind = self.load_sample_data( args.samples_file, self.train) else: self.samples = set() self.hash_to_ind = {} self.callbacks = [ checkpoint, TensorBoard(log_dir=self.model_base + '.logs', ), LambdaCallback(on_epoch_end=on_epoch_end) ]