示例#1
0
    def set_configuration(self, cfg_in):
        import torch
        from bioharn import clf_predict
        cfg = self.get_configuration()
        _vital_config_update(cfg, cfg_in)

        for key in self._kwiver_config.keys():
            self._kwiver_config[key] = str(cfg.get_value(key))

        if self._kwiver_config['batch_size'] == "auto":
            self._kwiver_config['batch_size'] = 2
            if torch.cuda.is_available():
                gpu_mem = 0
                if len(self._kwiver_config['xpu']) == 1 and \
                  self._kwiver_config['xpu'] != 0:
                    gpu_id = int(self._kwiver_config['xpu'])
                    gpu_mem = torch.cuda.get_device_properties(
                        gpu_id).total_memory
                else:
                    self._gpu_count = torch.cuda.device_count()
                    for i in range(self._gpu_count):
                        single_gpu_mem = torch.cuda.get_device_properties(
                            i).total_memory
                    if gpu_mem == 0:
                        gpu_mem = single_gpu_mem
                    else:
                        gpu_mem = min(gpu_mem, single_gpu_mem)
                if gpu_mem > 9e9:
                    self._kwiver_config['batch_size'] = 4
                elif gpu_mem >= 7e9:
                    self._kwiver_config['batch_size'] = 3

        pred_config = clf_predict.ClfPredictConfig()
        pred_config['batch_size'] = self._kwiver_config['batch_size']
        pred_config['deployed'] = self._kwiver_config['deployed']
        pred_config['xpu'] = self._kwiver_config['xpu']
        pred_config['input_dims'] = 'native'
        # (256, 256)
        self.predictor = clf_predict.ClfPredictor(pred_config)
        self.predictor._ensure_model()
        self._area_pivot = int(self._kwiver_config['area_pivot'])
        self._area_lower_bound = int(self._kwiver_config['area_lower_bound'])
        self._area_upper_bound = int(self._kwiver_config['area_upper_bound'])
        self._border_exclude = int(self._kwiver_config['border_exclude'])
        self._average_prior = strtobool(self._kwiver_config['average_prior'])

        if self._area_pivot < 0:
            self._area_upper_bound = -self._area_pivot
        elif self._area_pivot > 0:
            self._area_lower_bound = self._area_pivot

        return True
示例#2
0
    def set_configuration(self, cfg_in):
        import torch
        from bioharn import clf_predict
        cfg = self.get_configuration()

        # HACK: merge config doesn't support dictionary input
        _vital_config_update(cfg, cfg_in)

        for key in self._kwiver_config.keys():
            self._kwiver_config[key] = str(cfg.get_value(key))

        if self._kwiver_config['batch_size'] == "auto":
            self._kwiver_config['batch_size'] = 2
            if torch.cuda.is_available():
                gpu_mem = 0
                if len(self._kwiver_config['xpu']) == 1 and \
                  self._kwiver_config['xpu'] != 0:
                    gpu_id = int(self._kwiver_config['xpu'])
                    gpu_mem = torch.cuda.get_device_properties(
                        gpu_id).total_memory
                else:
                    self._gpu_count = torch.cuda.device_count()
                    for i in range(self._gpu_count):
                        single_gpu_mem = torch.cuda.get_device_properties(
                            i).total_memory
                    if gpu_mem == 0:
                        gpu_mem = single_gpu_mem
                    else:
                        gpu_mem = min(gpu_mem, single_gpu_mem)
                if gpu_mem > 9e9:
                    self._kwiver_config['batch_size'] = 4
                elif gpu_mem >= 7e9:
                    self._kwiver_config['batch_size'] = 3

        pred_config = clf_predict.ClfPredictConfig()
        pred_config['batch_size'] = self._kwiver_config['batch_size']
        pred_config['deployed'] = self._kwiver_config['deployed']
        if torch.cuda.is_available():
            pred_config['xpu'] = self._kwiver_config['xpu']
        else:
            pred_config['xpu'] = "cpu"
        pred_config['input_dims'] = 'native'
        # (256, 256)
        self.predictor = clf_predict.ClfPredictor(pred_config)

        self.predictor._ensure_model()
        return True
示例#3
0
    def set_configuration(self, cfg_in):
        import torch
        from bioharn import clf_predict
        cfg = self.get_configuration()
        _vital_config_update(cfg, cfg_in)

        for key in self._kwiver_config.keys():
            self._kwiver_config[key] = str(cfg.get_value(key))

        if self._kwiver_config['batch_size'] == "auto":
            self._kwiver_config['batch_size'] = 2
            if torch.cuda.is_available():
                gpu_mem = 0
                if len(self._kwiver_config['xpu']) == 1 and \
                  self._kwiver_config['xpu'] != 0:
                    gpu_id = int(self._kwiver_config['xpu'])
                    gpu_mem = torch.cuda.get_device_properties(
                        gpu_id).total_memory
                else:
                    self._gpu_count = torch.cuda.device_count()
                    for i in range(self._gpu_count):
                        single_gpu_mem = torch.cuda.get_device_properties(
                            i).total_memory
                    if gpu_mem == 0:
                        gpu_mem = single_gpu_mem
                    else:
                        gpu_mem = min(gpu_mem, single_gpu_mem)
                if gpu_mem > 9e9:
                    self._kwiver_config['batch_size'] = 4
                elif gpu_mem >= 7e9:
                    self._kwiver_config['batch_size'] = 3

        pred_config = clf_predict.ClfPredictConfig()
        pred_config['batch_size'] = self._kwiver_config['batch_size']
        pred_config['deployed'] = self._kwiver_config['deployed']
        pred_config['xpu'] = self._kwiver_config['xpu']
        pred_config['input_dims'] = 'native'  # (256, 256)

        self.predictor = clf_predict.ClfPredictor(pred_config)
        self.predictor._ensure_model()
        self._area_pivot = int(self._kwiver_config['area_pivot'])
        self._area_lower_bound = int(self._kwiver_config['area_lower_bound'])
        self._area_upper_bound = int(self._kwiver_config['area_upper_bound'])
        self._border_exclude = int(self._kwiver_config['border_exclude'])
        self._average_prior = strtobool(self._kwiver_config['average_prior'])

        if self._area_pivot < 0:
            self._area_upper_bound = -self._area_pivot
        elif self._area_pivot > 0:
            self._area_lower_bound = self._area_pivot

        # Load scale based on type file if enabled
        self._target_type_scales = dict()
        if self._kwiver_config['scale_type_file']:
            fin = open(self._kwiver_config['scale_type_file'], 'r')
            for line in fin.readlines():
                line = line.rstrip()
                parsed_line = line.split()
                if len(parsed_line) < 1:
                    continue
                target_area = float(parsed_line[-1])
                type_str = str(' '.join(parsed_line[:-1]))
                self._target_type_scales[type_str] = target_area

        return True