def inference(self, img): h, w = img.shape[:2] img_var = img.astype(np.float32) semantic_predictions = self.sess.run(self.predictions, feed_dict={self.input_images: img_var}) pred = np.squeeze(semantic_predictions[0]) pred_cls_idx = np.argmax(pred, axis=2) res_figures = sly.prediction_to_sly_bitmaps(self.out_class_mapping, pred_cls_idx) res_ann = sly.Annotation.new_with_objects((w, h), res_figures) return res_ann
def _determine_input_data(self): project_fs = sly.ProjectFS.from_disk_dir_project(self.helper.paths.project_dir) logger.info('Project structure has been read. Samples: {}.'.format(project_fs.pr_structure.image_cnt)) self.in_project_fs = project_fs self.inf_feeder = sly.InferenceFeederFactory.create( self.config, self.helper.in_project_meta, self.train_classes ) if self.inf_feeder.expected_result == sly.InfResultsToFeeder.FIGURES: self._postproc = lambda a, pred: sly.prediction_to_sly_bitmaps(a, np.argmax(pred, axis=2)) elif self.inf_feeder.expected_result == sly.InfResultsToFeeder.SEGMENTATION: self._postproc = lambda a, b: (a, b) else: raise NotImplementedError()
def inference(self, img): h, w = img.shape[:2] x = cv2.resize(img, tuple(self.input_size_wh)) x = input_image_normalizer(x) x = torch.stack([x], 0) # add dim #0 (batch size 1) x = cuda_variable(x, volatile=True) output = self.model(x) output = functional.softmax(output, dim=1) output = output.data.cpu().numpy()[0] # from batch to 3d pred = np.transpose(output, (1, 2, 0)) pred = cv2.resize(pred, (w, h), cv2.INTER_LINEAR) pred_cls_idx = np.argmax(pred, axis=2) res_figures = sly.prediction_to_sly_bitmaps(self.out_class_mapping, pred_cls_idx) res_ann = sly.Annotation.new_with_objects((w, h), res_figures) return res_ann