async def train_2(self, config): if not self._initialized: api.alert('Please click `Anet-Lite` before training.') return opt = self._opt sources = GenericTransformedImages(opt) epochs = config.epochs self.dash = await api.createWindow(type="Im2Im-Dashboard", name="Anet-lite Training", w=20, h=15, data={"display_mode": "all", 'metrics': ['mse', 'dssim_l1'], 'callbacks': ['onStep']}) updateUI = UpdateUI(epochs, self.dash, make_generator(sources['valid'], batch_size=1), opt) # updateUI = [] opt.batch_size = config.batchsize abort.clear() tensorboard = TensorBoard(log_dir=os.path.join(opt.checkpoints_dir, config.name + 'logs'), histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True) checkpointer = ModelCheckpoint(filepath=os.path.join(opt.checkpoints_dir, config.name + '__model__.hdf5'), verbose=1, save_best_only=True) self.model.fit_generator(make_generator(sources['train'], batch_size=opt.batch_size), validation_data=make_generator(sources['valid'], batch_size=opt.batch_size), validation_steps=4, steps_per_epoch=config.steps, epochs=epochs, verbose=2, callbacks=[updateUI, checkpointer, tensorboard]) self.model.save(os.path.join(opt.checkpoints_dir, config.name + '__model__.hdf5')) model_config = {} model_config['input_size'] = opt.input_size model_config['input_channels'] = len(opt.input_channels) model_config['target_channels'] = len(opt.target_channels) with open(os.path.join(opt.work_dir, 'model_config.json'), 'w') as f: json.dump(model_config, f)
def test_train(): model = UnetGenerator(image_size=opt.input_size, input_channels=opt.input_nc, target_channels=opt.target_nc, filters_base=16) model.compile(optimizer='adam', loss=mean_absolute_error, metrics=[mean_absolute_error]) sources = TransformedTubulin001(opt) d = make_generator(sources['train'], batch_size=opt.batch_size) x, y = next(d) assert x.shape == (opt.batch_size, opt.input_size, opt.input_size, opt.input_nc) assert y.shape == (opt.batch_size, opt.target_size, opt.target_size, opt.target_nc) model.fit_generator(make_generator(sources['train'], batch_size=opt.batch_size), validation_data=make_generator( sources['test'], batch_size=opt.batch_size), validation_steps=1, steps_per_epoch=1, epochs=1, verbose=2, callbacks=[]) import tensorflowjs as tfjs tfjs.converters.save_keras_model(model, opt.work_dir + '/__js_model__')
if opt.load_from is not None: model.load_weights(opt.load_from) DSSIM_L1 = get_dssim_l1_loss() model.compile(optimizer='adam', loss=DSSIM_L1, metrics=['mse', DSSIM_L1]) sources = TransformedLRSR(opt) tensorboard = TensorBoard(log_dir=os.path.join(opt.checkpoints_dir, 'logs'), histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True) checkpointer = ModelCheckpoint(filepath=os.path.join(opt.checkpoints_dir, 'weights.hdf5'), verbose=1, save_best_only=True) model.fit_generator(make_generator(sources['train'], batch_size=opt.batch_size), validation_data=make_generator(sources['test'], batch_size=opt.batch_size), validation_steps=4, steps_per_epoch=200, epochs=1000, verbose=2, callbacks=[checkpointer, tensorboard]) export_model_to_js(model, opt.work_dir + '/__js_model__')
opt.target_nc = len(opt.target_channels) opt.batch_size = 10 if not os.path.exists(os.path.join(opt.work_dir, 'train')): print('Downloading dataset...') os.makedirs(opt.work_dir, exist_ok=True) download_with_url('https://kth.box.com/shared/static/r6kjgvdkcuehssxipaxqxfflmz8t65u1.zip', os.path.join(opt.work_dir, 'SegmentationTrainingProcessed_CG_20200109-offset-corrected.zip'), unzip=True) model = UnetGenerator(input_size=opt.input_size, input_channels=opt.input_nc, target_channels=opt.target_nc, base_filter=23) if opt.load_from is not None: model.load_weights(opt.load_from) DSSIM_L1 = get_dssim_l1_loss() model.compile(optimizer='adam', loss=DSSIM_L1, metrics=['mse', DSSIM_L1]) model.summary() sources = GenericTransformedImages(opt) tensorboard = TensorBoard(log_dir=os.path.join(opt.checkpoints_dir, 'logs'), histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True) checkpointer = ModelCheckpoint(filepath=os.path.join(opt.checkpoints_dir, 'weights.hdf5'), verbose=1, save_best_only=True) updateUI = UpdateUI(1000, make_generator(sources['valid'], batch_size=opt.batch_size), os.path.join(opt.work_dir, 'outputs')) model.fit_generator(make_generator(sources['train'], batch_size=opt.batch_size), validation_data=make_generator(sources['valid'], batch_size=opt.batch_size), validation_steps=4, steps_per_epoch=1, epochs=1000, verbose=2, use_multiprocessing=True, workers=10, callbacks=[tensorboard, checkpointer, updateUI]) export_model_to_js(model, opt.work_dir+'/__js_model__')