Ejemplo n.º 1
0
    async def add_training_data(self, sample_path, local_anno=False):
        # get annotation.json
        file_fs_path = os.path.join(sample_path, "annotation.json")
        anno_path = file_fs_path
        if not local_anno:
            file_content = await self.readFile(file_fs_path)
            # anno_path = "datasets" + file_fs_path
            with open(anno_path, "w") as f:
                f.write(file_content)

        # generate mask
        if os.path.exists(anno_path):
            print("generate mask from file:", anno_path)
            self.gen_mask_from_geojson(files_proc=[anno_path])
        else:
            print("can not find annotation file:", anno_path)

        # mv to train dir
        shutil.move(
            os.path.dirname(anno_path),
            os.path.join(self._opt.work_dir, "train"))

        # update training generate
        self.sources = GenericTransformedImages(self._opt)
        pass
Ejemplo n.º 2
0
    async def train_2(self, config):
        if not self._initialized:
            api.alert('Please click `Anet-Lite` before training.')
            return
        opt = self._opt

        sources = GenericTransformedImages(opt)
        epochs = config.epochs
        self.dash = await api.createWindow(type="Im2Im-Dashboard", name="Anet-lite Training", w=20, h=15,
                                           data={"display_mode": "all", 'metrics': ['mse', 'dssim_l1'],
                                                 'callbacks': ['onStep']})
        updateUI = UpdateUI(epochs, self.dash, make_generator(sources['valid'], batch_size=1), opt)
        # updateUI = []
        opt.batch_size = config.batchsize
        abort.clear()
        tensorboard = TensorBoard(log_dir=os.path.join(opt.checkpoints_dir, config.name + 'logs'), histogram_freq=0,
                                  batch_size=32, write_graph=True, write_grads=False, write_images=True)
        checkpointer = ModelCheckpoint(filepath=os.path.join(opt.checkpoints_dir, config.name + '__model__.hdf5'),
                                       verbose=1, save_best_only=True)
        self.model.fit_generator(make_generator(sources['train'], batch_size=opt.batch_size),
                                 validation_data=make_generator(sources['valid'], batch_size=opt.batch_size),
                                 validation_steps=4, steps_per_epoch=config.steps, epochs=epochs, verbose=2,
                                 callbacks=[updateUI, checkpointer, tensorboard])
        self.model.save(os.path.join(opt.checkpoints_dir, config.name + '__model__.hdf5'))

        model_config = {}
        model_config['input_size'] = opt.input_size
        model_config['input_channels'] = len(opt.input_channels)
        model_config['target_channels'] = len(opt.target_channels)

        with open(os.path.join(opt.work_dir, 'model_config.json'), 'w') as f:
            json.dump(model_config, f)
Ejemplo n.º 3
0
    async def predict(self, my=None, sample_path=None):
        if not self._initialized:
            api.alert('Please click `Anet-Lite` before testing.')
            return

        sources = GenericTransformedImages(self._opt)
        batch_size = 1
        source = sources['test']
        count = 0

        if sample_path is None:
            test_samples = os.listdir(os.path.join(self.work_dir, "test"))
            sample_path = os.path.join(self.work_dir, "test", test_samples[0])
        # print("start run cus_make_test_generator ...")
        gen = self.cus_make_test_generator(source, sample_path)
        # gen = make_test_generator(source)
        api.showStatus('making predictions.')
        totalsize = len(source)
        self.dash = await api.createWindow(type="Im2Im-Dashboard", name="Anet-lite Prediction", w=25, h=10,
                                           data={"display_mode": "all"})

        input_channels = [ch[0] for ch in self._opt.input_channels]
        output_channels = [ch[0] + '_output' for ch in self._opt.target_channels]
        label = 'Sample '
        titles = [input_channels, output_channels]
        print("titles:", titles)

        xbatch, paths = next(gen)
        print("start run predict ...")
        ypbatch = self.model.predict(xbatch, batch_size=batch_size)
        tensor_list = [xbatch, ypbatch]
        plot_tensors(self.dash, tensor_list, label, titles)
        count += batch_size
        for b in range(len(ypbatch)):
            image = ypbatch[b]
            path = paths[b]
            _, name = os.path.split(path)
            # output_path = os.path.join(sample_path, name)
            for i in range(image.shape[2]):
                misc.imsave(os.path.join(sample_path, output_channels[i] + '.png'),
                            image[:, :, i].astype('float32'))
        api.showProgress(1.0 * count / totalsize)
        api.showStatus('making predictions: {}/{}'.format(count, totalsize))
        annotation_json = self.masks_to_annotation(sample_path, outputs=self.config_json.get("outputs"))
        print("save prediction.json to path:", sample_path)
        return annotation_json
Ejemplo n.º 4
0
    async def test(self, my):
        if not self._initialized:
            api.alert('Please click `Anet-Lite` before testing.')
            return
        sources = GenericTransformedImages(self._opt)
        batch_size = 1
        source = sources['test']
        count = 0
        output_dir = os.path.join(self._opt.work_dir, 'outputs')
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        gen = make_test_generator(source, batch_size=batch_size)
        api.showStatus('making predictions.')
        totalsize = len(source)
        self.dash = await api.createWindow(type="Im2Im-Dashboard", name="Anet-lite Prediction", w=25, h=10, data={"display_mode": "all"})

        input_channels = [ch[0] for ch in self._opt.input_channels]
        output_channels = ['output_'+ch[0] for ch in self._opt.target_channels]

        for i in range(int(totalsize/batch_size+0.5)):
            xbatch, paths = next(gen)
            ypbatch = self.model.predict(xbatch, batch_size=batch_size)
            tensor_list = [xbatch, ypbatch]
            label = 'Sample '+ str(i)
            titles = [input_channels, output_channels]
            plot_tensors(self.dash, tensor_list, label, titles)
            count +=batch_size
            for b in range(len(ypbatch)):
                image = ypbatch[b]
                path = paths[b]
                _, name = os.path.split(path)
                output_path = os.path.join(output_dir, name)
                for i in range(image.shape[2]):
                    # im = Image.fromarray(image[:, :, i].astype('float32'))
                    # im.save(output_path+'_'+output_channels[i]+'_output.tif')
                    misc.imsave(output_path+'_'+output_channels[i]+'_output.tif', image[:, :, i].astype('float32'))
            api.showProgress(1.0*count/totalsize)
            api.showStatus('making predictions: {}/{}'.format(count, totalsize))
Ejemplo n.º 5
0
# Total params: 6,764,136
# Trainable params: 6,736,130
# Non-trainable params: 28,006
model = MobileUNet(input_size=opt.input_size,
                   input_channels=opt.input_nc,
                   target_channels=opt.target_nc)

if opt.load_from is not None:
    model.load_weights(opt.load_from)

DSSIM_L1 = get_dssim_l1_loss()
model.compile(optimizer='adam', loss=DSSIM_L1, metrics=['mse', DSSIM_L1])

model.summary()

sources = GenericTransformedImages(opt)

tensorboard = TensorBoard(log_dir=os.path.join(opt.checkpoints_dir, 'logs'),
                          histogram_freq=0,
                          batch_size=32,
                          write_graph=True,
                          write_grads=False,
                          write_images=True)
checkpointer = ModelCheckpoint(filepath=os.path.join(opt.checkpoints_dir,
                                                     'weights.hdf5'),
                               verbose=1,
                               save_best_only=True)
updateUI = UpdateUI(
    1000, make_generator(sources['valid'], batch_size=opt.batch_size),
    os.path.join(opt.work_dir, 'outputs'))
model.fit_generator(make_generator(sources['train'],