def train(self, weights_path=None, callbacks=[]): # Network setup. ResNet layers frozen at first. net, self.cfg['preprocess_imgs_func'] = self.cfg['net_builder_func'](self.cfg['input_shape']) json.dump(net.to_json(), open('%s/model.json' % self.cpdir, 'w'), indent=2) json.dump(serialize_config(self.cfg), open('%s/config.json' % self.cpdir, 'w')) data_trn = h5py.File(self.cfg['hdf5_path_trn']) data_tst = h5py.File(self.cfg['hdf5_path_tst']) nb_examples = len(data_trn.get('images')) + len(data_tst.get('images')) steps_trn = math.ceil(nb_examples / self.cfg['trn_batch_size']) steps_trn = 100 gen_trn = self.batch_gen(steps_trn) opt = self.cfg['trn_optimizer'](**self.cfg['trn_optimizer_args']) net.compile(optimizer=opt, loss=self.cfg['net_loss_func']) net.summary() if weights_path is not None: net.load_weights(weights_path) pprint(self.cfg) cb = [ ExamplesCB(self.cfg['cpdir'], gen_trn), HistoryPlotCB('%s/history.png' % self.cpdir), EarlyStopping(monitor='loss', min_delta=0.01, patience=20, verbose=1, mode='max'), CSVLogger('%s/history.csv' % self.cpdir), ModelCheckpoint('%s/wloss.hdf5' % self.cpdir, monitor='val_F2', verbose=1, save_best_only=True, mode='max'), TerminateOnNaN() ] train = net.fit_generator(gen_trn, steps_per_epoch=steps_trn, epochs=self.cfg['trn_epochs'], verbose=1, callbacks=cb) return train.history
def train(self, weights_path=None, callbacks=[]): # Metrics. def prec(yt, yp): yp = K.cast(yp > self.cfg['net_threshold'], 'float') tp = K.sum(yt * yp) fp = K.sum(K.clip(yp - yt, 0, 1)) return tp / (tp + fp + K.epsilon()) def reca(yt, yp): yp = K.cast(yp > self.cfg['net_threshold'], 'float') tp = K.sum(yt * yp) fn = K.sum(K.clip(yt - yp, 0, 1)) return tp / (tp + fn + K.epsilon()) def F2(yt, yp): p = prec(yt, yp) r = reca(yt, yp) b = 2.0 return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon())) # Network setup. ResNet layers frozen at first. net, self.cfg['preprocess_imgs_func'], self.cfg['preprocess_tags_func'] = \ self.cfg['net_builder_func'](self.cfg['input_shape']) json.dump(net.to_json(), open('%s/model.json' % self.cpdir, 'w'), indent=2) json.dump(serialize_config(self.cfg), open('%s/config.json' % self.cpdir, 'w')) # Data setup. idxs_trn, idxs_val = get_train_val_idxs(self.cfg['hdf5_path_trn'], self.cfg['trn_prop_data'], self.cfg['trn_prop_trn']) steps_trn = math.ceil(len(idxs_trn) / self.cfg['trn_batch_size']) steps_val = math.ceil(len(idxs_val) / self.cfg['trn_batch_size']) gen_trn = self.batch_gen( idxs_trn, steps_trn, nb_augment_max=self.cfg['trn_augment_max_trn']) gen_val = self.batch_gen( idxs_val, steps_val, nb_augment_max=self.cfg['trn_augment_max_val']) opt = self.cfg['trn_optimizer'](**self.cfg['trn_optimizer_args']) net.compile(optimizer=opt, loss=self.cfg['net_loss_func'], metrics=[F2, prec, reca]) net.summary() if weights_path is not None: net.load_weights(weights_path) pprint(self.cfg) cb = [ # FineTuneCB(unfreeze_after=2, unfreeze_lr_mult=0.1), HistoryPlotCB('%s/history.png' % self.cpdir), EarlyStopping(monitor='F2', min_delta=0.01, patience=20, verbose=1, mode='max'), CSVLogger('%s/history.csv' % self.cpdir), ModelCheckpoint('%s/wvalf2.hdf5' % self.cpdir, monitor='val_F2', verbose=1, save_best_only=True, mode='max'), TerminateOnNaN(), ] + callbacks if self.cfg['trn_monitor_val']: cb.append( ReduceLROnPlateau(monitor='val_F2', factor=0.5, patience=5, min_lr=1e-4, epsilon=1e-2, verbose=1, mode='max')) cb.append( EarlyStopping(monitor='val_F2', min_delta=0.01, patience=20, verbose=1, mode='max')) train = net.fit_generator(gen_trn, steps_per_epoch=steps_trn, epochs=self.cfg['trn_epochs'], verbose=1, callbacks=cb, validation_data=gen_val, validation_steps=steps_val, workers=1) return train.history
def serialize(self): self.net.save('%s/model.hdf5' % self.cpdir) json.dump(serialize_config(self.cfg), open('%s/config.json' % self.cpdir, 'w'))