Esempio n. 1
0
    def mws_segmentation(self, affs):

        image_shape = affs.shape[1:]
        number_of_nodes = affs.shape[-1] * affs.shape[-2]
        offsets = [[-1, 0],
                   [0, -1],
                   [-3, 0],
                   [0, -3],
                   [-9, 0],
                   [0, -9],
                   [-27, 0],
                   [0, -27]]

        api.log('creating gridgraph')
        graph = MWSGridGraph(image_shape)
        api.log('create local edges')
        uvs, costs = graph.compute_nh_and_weights(1 - affs[:2],
                                                  offsets[:2])

        api.log('create mutex edges')
        mutex_uvs, mutex_costs = graph.compute_nh_and_weights(affs[2:],
                                                              offsets[2:],
                                                              [10, 10],
                                                              True)
        api.log('running mws')
        segmentation = compute_mws_clustering(number_of_nodes,
                                              uvs,
                                              mutex_uvs,
                                              costs,
                                              mutex_costs)

        return segmentation.reshape(image_shape)
Esempio n. 2
0
 def setup(self):
     api.log('initialized')
Esempio n. 3
0
    def run(self, ctx):

        raw_file = "/home/swolf/local/data/hackathon2019/slice0.png"
        model_file = "/home/swolf/local/data/models/isbi_2d.pytorch"

        raw_img = self.load_image(raw_file)
        # model = self.load_model(model_file)
        model = None

        api.log('predicting')
        affinities = self.predict(model, raw_img)
        api.log('running mws')
        segmentation = self.mws_segmentation(affinities)

        api.log('mws done')
        segmentation = (segmentation % 255).astype(np.uint8)

        blend_image = segmentation[:512, :512]
        api.log('uniques' + str(np.unique(blend_image)))
        api.log("blend_shape ", blend_image.shape)
        name_plot = "/home/swolf/pictures/tmp.png"
        imsave(name_plot, blend_image)

        with open(name_plot, 'rb') as f:
            data = f.read()
            api.log("QQQQQQ" + str(type(data)))
            result = base64.b64encode(data).decode('ascii')
            imgurl = 'data:image/png;base64,' + result
            api.createWindow(type = 'imjoy/image', w=12, h=15,data = {"src": imgurl})
Esempio n. 4
0
    async def finish_config_callback(self, callback_config):
        print("callback_config:", callback_config)
        if self.config_win is not None:
            self.config_win.close()
        api.showMessage('network configured.')
        with open(os.path.join(self.work_dir, "anet-config.json"), "w") as f:
            f.write(json.dumps(callback_config))
        self.config_json.update(callback_config)

        # await self.get_data_by_config(config=self.config_json)
        api.showStatus("generating mask from the annotation file ...")
        self.get_mask_by_json(config=self.config_json)
        api.showStatus("Masks generated, now start training...")
        self._opt = self.get_opt(self.config_json, work_dir=self.work_dir)
        self.initialize(self._opt)
        api.log("self._opt.work_dir:" + self._opt.work_dir)
        api.log("self._opt.input_channels:" + str(self._opt.input_channels))
        api.log("self._opt.target_channels:" + str(self._opt.target_channels))
        api.log("self._opt.input_nc:" + str(self._opt.input_nc))
        api.log("self._opt.target_nc:" + str(self._opt.target_nc))

        config = my_config()
        api.log("config.name:" + config.name)
        api.log("config.epochs:" + str(config.epochs))
        api.log("config.steps:" + str(config.steps))
        api.log("config.batchsize:" + str(config.batchsize))

        await self.train_2(config)