Пример #1
0
def build_model(cfg, paramsfile):
    model = Classifier(cfg)
    model = model.to('cpu')
    ckpt = torch.load(paramsfile, map_location='cpu')
    state_dict = ckpt['state_dict'] if 'state_dict' in ckpt else ckpt
    model.load_state_dict(state_dict)
    if 'step' in ckpt and 'auc_dev_best' in ckpt:
        print(f"Using model '{paramsfile}' at step: {ckpt['step']} "
              f"with AUC: {ckpt['auc_dev_best']}")
    return model.eval()
class MDAIModel:
    def __init__(self):
        root_path = os.path.dirname(__file__)

        with open(os.path.join(root_path, "config/example.json")) as f:
            cfg = edict(json.load(f))

        self.model = Classifier(cfg)
        self.model.cfg.num_classes = [1, 1, 1, 1, 1, 1]
        self.model._init_classifier()
        self.model._init_attention_map()
        self.model._init_bn()

        if torch.cuda.is_available():
            self.model = self.model.eval().cuda()
        else:
            self.model = self.model.eval().cpu()

        chkpt_path = os.path.join(root_path, "model_best.pt")
        self.model.load_state_dict(
            torch.load(chkpt_path, map_location=lambda storage, loc: storage)
        )

    def predict(self, data):
        """
        The input data has the following schema:

        {
            "instances": [
                {
                    "file": "bytes"
                    "tags": {
                        "StudyInstanceUID": "str",
                        "SeriesInstanceUID": "str",
                        "SOPInstanceUID": "str",
                        ...
                    }
                },
                ...
            ],
            "args": {
                "arg1": "str",
                "arg2": "str",
                ...
            }
        }

        Model scope specifies whether an entire study, series, or instance is given to the model.
        If the model scope is 'INSTANCE', then `instances` will be a single instance (list length of 1).
        If the model scope is 'SERIES', then `instances` will be a list of all instances in a series.
        If the model scope is 'STUDY', then `instances` will be a list of all instances in a study.

        The additional `args` dict supply values that may be used in a given run.

        For a single instance dict, `files` is the raw binary data representing a DICOM file, and
        can be loaded using: `ds = pydicom.dcmread(BytesIO(instance["file"]))`.

        The results returned by this function should have the following schema:

        [
            {
                "type": "str", // 'NONE', 'ANNOTATION', 'IMAGE', 'DICOM', 'TEXT'
                "study_uid": "str",
                "series_uid": "str",
                "instance_uid": "str",
                "frame_number": "int",
                "class_index": "int",
                "data": {},
                "probability": "float",
                "explanations": [
                    {
                        "name": "str",
                        "description": "str",
                        "content": "bytes",
                        "content_type": "str",
                    },
                    ...
                ],
            },
            ...
        ]

        The DICOM UIDs must be supplied based on the scope of the label attached to `class_index`.
        """
        input_instances = data["instances"]
        input_args = data["args"]

        results = []

        for instance in input_instances:
            tags = instance["tags"]
            ds = pydicom.dcmread(BytesIO(instance["file"]))
            x = ds.pixel_array

            x_orig = x

            # preprocess image
            # convert grayscale to RGB
            x = cv2.resize(x, (1024, 1024))
            x = equalize_adapthist(x.astype(float) / x.max(), clip_limit=0.01)
            x = cv2.resize(x, (512, 512))
            x = x * 2 - 1
            x = np.array([[x, x, x]])
            x = torch.from_numpy(x).float()
            if torch.cuda.is_available():
                x = x.cuda()
            else:
                x = x.cpu()

            with torch.no_grad():
                logits, logit_maps = self.model(x)
                logits = torch.cat(logits, dim=1).detach().cpu()
                y_prob = torch.sigmoid(logits - torch.from_numpy(threshs).reshape((1, 6)))
                y_prob = y_prob.cpu().numpy()

            x.requires_grad = True

            y_classes = y_prob >= 0.5
            class_indices = np.where(y_classes.astype("bool"))[1]

            if len(class_indices) == 0:
                # no outputs, return 'NONE' output type
                result = {
                    "type": "NONE",
                    "study_uid": tags["StudyInstanceUID"],
                    "series_uid": tags["SeriesInstanceUID"],
                    "instance_uid": tags["SOPInstanceUID"],
                    "frame_number": None,
                }
                results.append(result)
            else:
                for class_index in class_indices:
                    probability = y_prob[0][class_index]

                    gradcam = GradCam(self.model)
                    gradcam_output = gradcam.generate_cam(x, x_orig, class_index)
                    gradcam_output_buffer = BytesIO()
                    gradcam_output.save(gradcam_output_buffer, format="PNG")

                    intgrad = IntegratedGradients(self.model)
                    intgrad_output = intgrad.generate_integrated_gradients(x, class_index, 5)
                    intgrad_output_buffer = BytesIO()
                    intgrad_output.save(intgrad_output_buffer, format="PNG")

                    result = {
                        "type": "ANNOTATION",
                        "study_uid": tags["StudyInstanceUID"],
                        "series_uid": tags["SeriesInstanceUID"],
                        "instance_uid": tags["SOPInstanceUID"],
                        "frame_number": None,
                        "class_index": int(class_index),
                        "data": None,
                        "probability": float(probability),
                        "explanations": [
                            {
                                "name": "Grad-CAM",
                                "description": "Visualize how parts of the image affects neural network’s output by looking into the activation maps. From _Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization_ (https://arxiv.org/abs/1610.02391)",
                                "content": gradcam_output_buffer.getvalue(),
                                "content_type": "image/png",
                            },
                            {
                                "name": "Integrated Gradients",
                                "description": "Visualize an average of the gradients along the construction of the input towards the decision. From _Axiomatic Attribution for Deep Networks_ (https://arxiv.org/abs/1703.01365)",
                                "content": intgrad_output_buffer.getvalue(),
                                "content_type": "image/png",
                            },
                        ],
                    }
                    results.append(result)

        return results
class MDAIModel:
    def __init__(self):
        root_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                 "model")

        with open(os.path.join(root_path, "config/example.json")) as f:
            cfg = edict(json.load(f))

        self.model = Classifier(cfg)
        self.model.cfg.num_classes = [1, 1, 1, 1, 1, 1]
        self.model._init_classifier()
        self.model._init_attention_map()
        self.model._init_bn()

        if torch.cuda.is_available():
            self.model = self.model.eval().cuda()
        else:
            self.model = self.model.eval().cpu()

        chkpt_path = os.path.join(root_path, "model_best.pt")
        self.model.load_state_dict(
            torch.load(chkpt_path, map_location=lambda storage, loc: storage))

    def predict(self, data):
        """
        See https://github.com/mdai/model-deploy/blob/master/mdai/server.py for details on the
        schema of `data` and the required schema of the outputs returned by this function.
        """
        input_files = data["files"]
        input_annotations = data["annotations"]
        input_args = data["args"]

        outputs = []

        for file in input_files:
            if file["content_type"] != "application/dicom":
                continue

            ds = pydicom.dcmread(BytesIO(file["content"]))
            x = ds.pixel_array

            x_orig = x

            # preprocess image
            # convert grayscale to RGB
            x = cv2.resize(x, (1024, 1024))
            x = equalize_adapthist(x.astype(float) / x.max(), clip_limit=0.01)
            x = cv2.resize(x, (512, 512))
            x = x * 2 - 1
            x = np.array([[x, x, x]])
            x = torch.from_numpy(x).float()
            if torch.cuda.is_available():
                x = x.cuda()
            else:
                x = x.cpu()

            with torch.no_grad():
                logits, logit_maps = self.model(x)
                logits = torch.cat(logits, dim=1).detach().cpu()
                y_prob = torch.sigmoid(logits -
                                       torch.from_numpy(threshs).reshape((1,
                                                                          6)))
                y_prob = y_prob.cpu().numpy()

            x.requires_grad = True

            y_classes = y_prob >= 0.5
            class_indices = np.where(y_classes.astype("bool"))[1]

            if len(class_indices) == 0:
                # no outputs, return 'NONE' output type
                output = {
                    "type": "NONE",
                    "study_uid": str(ds.StudyInstanceUID),
                    "series_uid": str(ds.SeriesInstanceUID),
                    "instance_uid": str(ds.SOPInstanceUID),
                    "frame_number": None,
                }
                outputs.append(output)
            else:
                for class_index in class_indices:
                    probability = y_prob[0][class_index]

                    gradcam = GradCam(self.model)
                    gradcam_output = gradcam.generate_cam(
                        x, x_orig, class_index)
                    gradcam_output_buffer = BytesIO()
                    gradcam_output.save(gradcam_output_buffer, format="PNG")

                    intgrad = IntegratedGradients(self.model)
                    intgrad_output = intgrad.generate_integrated_gradients(
                        x, class_index, 5)
                    intgrad_output_buffer = BytesIO()
                    intgrad_output.save(intgrad_output_buffer, format="PNG")

                    output = {
                        "type":
                        "ANNOTATION",
                        "study_uid":
                        str(ds.StudyInstanceUID),
                        "series_uid":
                        str(ds.SeriesInstanceUID),
                        "instance_uid":
                        str(ds.SOPInstanceUID),
                        "frame_number":
                        None,
                        "class_index":
                        int(class_index),
                        "data":
                        None,
                        "probability":
                        float(probability),
                        "explanations": [
                            {
                                "name": "Grad-CAM",
                                "description":
                                "Visualize how parts of the image affects neural network’s output by looking into the activation maps. From _Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization_ (https://arxiv.org/abs/1610.02391)",
                                "content": gradcam_output_buffer.getvalue(),
                                "content_type": "image/png",
                            },
                            {
                                "name": "Integrated Gradients",
                                "description":
                                "Visualize an average of the gradients along the construction of the input towards the decision. From _Axiomatic Attribution for Deep Networks_ (https://arxiv.org/abs/1703.01365)",
                                "content": intgrad_output_buffer.getvalue(),
                                "content_type": "image/png",
                            },
                        ],
                    }
                    outputs.append(output)

        return outputs
Пример #4
0
parser = argparse.ArgumentParser(description='test converter')
parser.add_argument('model_path', default=None, metavar='MODEL_PATH', type=str,
                    help="Path to the trained models")
args = parser.parse_args()

with open(args.model_path+'cfg.json') as f:
    cfg = edict(json.load(f))

model_file = "model/best.pth"
device = torch.device('cpu')  # PyTorch v0.4.0
net = Classifier(cfg)
ckpt = torch.load("model/best.ckpt")
net.load_state_dict(ckpt['state_dict'], strict=False)
torch.save(net, model_file)

net.eval()

dummy_input = torch.ones([1, 3, 1024, 1024])

net.to(device)
output = net(dummy_input)

device = torch.device("cuda")  # PyTorch v0.4.0
summary(net.to(device), (3, 1024, 1024))

pytorch_parser = PytorchParser(model_file, [3, 1024, 1024])
#
pytorch_parser.run(model_file)

Model_FILE = model_file + '.prototxt'