コード例 #1
0
    async def finish_config_callback(self, callback_config):
        print("callback_config:", callback_config)
        if self.config_win is not None:
            self.config_win.close()
        api.showMessage('network configured.')
        with open(os.path.join(self.work_dir, "anet-config.json"), "w") as f:
            f.write(json.dumps(callback_config))
        self.config_json.update(callback_config)

        # await self.get_data_by_config(config=self.config_json)
        api.showStatus("generating mask from the annotation file ...")
        self.get_mask_by_json(config=self.config_json)
        api.showStatus("Masks generated, now start training...")
        self._opt = self.get_opt(self.config_json, work_dir=self.work_dir)
        self.initialize(self._opt)
        api.log("self._opt.work_dir:" + self._opt.work_dir)
        api.log("self._opt.input_channels:" + str(self._opt.input_channels))
        api.log("self._opt.target_channels:" + str(self._opt.target_channels))
        api.log("self._opt.input_nc:" + str(self._opt.input_nc))
        api.log("self._opt.target_nc:" + str(self._opt.target_nc))

        config = my_config()
        api.log("config.name:" + config.name)
        api.log("config.epochs:" + str(config.epochs))
        api.log("config.steps:" + str(config.steps))
        api.log("config.batchsize:" + str(config.batchsize))

        await self.train_2(config)
コード例 #2
0
 def on_batch_end(self, batch, logs):
     self.logs = logs
     api.showStatus('training epoch:' + str(self.epoch) + '/' + str(self.total_epoch) + ' ' + str(logs))
     sys.stdout.flush()
     self.dash.updateCallback('onStep', self.step, {'mse': np.asscalar(logs['mean_squared_error'])})
     self.step += 1
     if abort.is_set():
         raise Exception('Abort.')
コード例 #3
0
 def on_epoch_end(self, epoch, logs):
     self.epoch = epoch
     self.logs = logs
     api.showProgress(self.epoch / self.total_epoch * 100)
     api.showStatus('training epoch:' + str(self.epoch) + '/' + str(self.total_epoch) + ' ' + str(logs))
     xbatch, ybatch = next(self.gen)
     ypbatch = self.model.predict(xbatch, batch_size=1)
     tensor_list = [xbatch, ypbatch, ybatch]
     label = 'Step ' + str(self.step)
     titles = [self.input_channels, self.output_channels, self.target_channels]
     plot_tensors(self.dash, tensor_list, label, titles)
コード例 #4
0
    async def predict(self, my=None, sample_path=None):
        if not self._initialized:
            api.alert('Please click `Anet-Lite` before testing.')
            return

        sources = GenericTransformedImages(self._opt)
        batch_size = 1
        source = sources['test']
        count = 0

        if sample_path is None:
            test_samples = os.listdir(os.path.join(self.work_dir, "test"))
            sample_path = os.path.join(self.work_dir, "test", test_samples[0])
        # print("start run cus_make_test_generator ...")
        gen = self.cus_make_test_generator(source, sample_path)
        # gen = make_test_generator(source)
        api.showStatus('making predictions.')
        totalsize = len(source)
        self.dash = await api.createWindow(type="Im2Im-Dashboard", name="Anet-lite Prediction", w=25, h=10,
                                           data={"display_mode": "all"})

        input_channels = [ch[0] for ch in self._opt.input_channels]
        output_channels = [ch[0] + '_output' for ch in self._opt.target_channels]
        label = 'Sample '
        titles = [input_channels, output_channels]
        print("titles:", titles)

        xbatch, paths = next(gen)
        print("start run predict ...")
        ypbatch = self.model.predict(xbatch, batch_size=batch_size)
        tensor_list = [xbatch, ypbatch]
        plot_tensors(self.dash, tensor_list, label, titles)
        count += batch_size
        for b in range(len(ypbatch)):
            image = ypbatch[b]
            path = paths[b]
            _, name = os.path.split(path)
            # output_path = os.path.join(sample_path, name)
            for i in range(image.shape[2]):
                misc.imsave(os.path.join(sample_path, output_channels[i] + '.png'),
                            image[:, :, i].astype('float32'))
        api.showProgress(1.0 * count / totalsize)
        api.showStatus('making predictions: {}/{}'.format(count, totalsize))
        annotation_json = self.masks_to_annotation(sample_path, outputs=self.config_json.get("outputs"))
        print("save prediction.json to path:", sample_path)
        return annotation_json
コード例 #5
0
    def gen_mask_from_geojson(self, files_proc, img_size=None, infer=False):
        # %% Some housekeeping to setup example data
        # files_proc= [os.path.abspath(os.path.join('..','data','maskGenerator','img','annotation.json'))]

        # masks_to_create = {
        #   "cells": ['filled', 'edge', 'distance', 'weigthed'],
        #   "nuclei": ['filled', 'edge', 'distance', 'weigthed'],
        # }
        masks_to_create = {}
        masks_to_create_value = ['filled', 'edge', 'distance', 'weigthed']

        # annot_types = list(masks_to_create.keys())

        annotationsImporter = annotationUtils.GeojsonImporter()

        # Instance to save masks
        masks = annotationUtils.MaskGenerator()

        weightedEdgeMasks = annotationUtils.WeightedEdgeMaskGenerator(sigma=8, w0=10)
        distMapMasks = annotationUtils.DistanceMapGenerator(truncate_distance=None)

        # %% Loop over all files
        count = len(files_proc)
        for i, file_proc in enumerate(files_proc):
            print('PROCESSING FILE:')
            print(file_proc)
            dir_name, file_name = os.path.split(file_proc)
            api.showStatus('generating masks for: ' + dir_name.split('/')[-1])
            api.showProgress(i / count * 100)
            # Decompose file name
            drive, path_and_file = os.path.splitdrive(file_proc)
            path, file = os.path.split(path_and_file)
            file_base, ext = os.path.splitext(file)

            # Read annotation:  Correct class has been selected based on annot_type
            annot_dict_all, roi_size_all, image_size = annotationsImporter.load(file_proc)
            if img_size is not None:
                image_size = img_size

            annot_types = set(annot_dict_all[k]['properties']['label'] for k in annot_dict_all.keys())
            print("annot_types: ", annot_types)

            for annot_type in annot_types:
                if infer:
                    file_name_save = os.path.join(drive, path, annot_type + '_filled_output.png')
                else:
                    file_name_save = os.path.join(drive, path, annot_type + '_filled.png')
                if os.path.exists(file_name_save):
                    print("skip to generate mask:", file_name_save)
                    continue
                # print("annot_type: ", annot_type)
                masks_to_create[annot_type] = masks_to_create_value

                # Filter the annotations by label
                annot_dict = {k: annot_dict_all[k] for k in annot_dict_all.keys() if
                              annot_dict_all[k]['properties']['label'] == annot_type}
                # print("len(annot_dict):", len(annot_dict))
                # print("annot_dict.keys():", annot_dict.keys())

                # Create masks

                # Binary - is always necessary to creat other masks
                print(' .... creating binary masks .....')
                binaryMasks = annotationUtils.BinaryMaskGenerator(image_size=image_size, erose_size=5, obj_size_rem=500,
                                                                  save_indiv=True)
                mask_dict = binaryMasks.generate(annot_dict)

                # Save binary masks FILLED if specified
                if 'filled' in masks_to_create[annot_type]:
                    if infer:
                        file_name_save = os.path.join(drive, path, annot_type + '_filled_output.png')
                    else:
                        file_name_save = os.path.join(drive, path, annot_type + '_filled.png')
                    masks.save(mask_dict, 'fill', file_name_save)

                # # Edge mask
                # if 'edge' in masks_to_create[annot_type]:
                #     if infer:
                #         file_name_save = os.path.join(drive,path, annot_type + '_edge_output.png')
                #     else:
                #         file_name_save = os.path.join(drive,path, annot_type + '_edge.png')
                #     masks.save(mask_dict,'edge',file_name_save)

                # Distance map
                if 'distance' in masks_to_create[annot_type]:
                    print(' .... creating distance maps .....')
                    mask_dict = distMapMasks.generate(annot_dict, mask_dict)

                    # Save
                    if infer:
                        file_name_save = os.path.join(drive, path, annot_type + '_distmap_output.png')
                    else:
                        file_name_save = os.path.join(drive, path, annot_type + '_distmap.png')
                    masks.save(mask_dict, 'distance_map', file_name_save)