コード例 #1
0
    def classify(self, tresh):
        """
        Given the output scores (C x H x W) it returns the label map.
        """

        scoresCopy = self.scores.copy()
        predictions = np.argmax(self.scores, 0)
        mymax = np.max(self.scores, 0)

        for i in range(self.nclasses):
            scoresCopy[i, predictions == i] = 0.0

        delta = mymax - np.max(scoresCopy, 0)
        uncMatrix = delta < tresh

        predictions[uncMatrix] = self.nclasses
        self.label_colors.append([255, 255, 255])

        resimg = np.zeros((predictions.shape[0], predictions.shape[1], 3),
                          dtype='uint8')
        for label_index in range(self.nclasses + 1):
            resimg[predictions ==
                   label_index, :] = self.label_colors[label_index]

        qimg = utils.rgbToQImage(resimg)
        w = qimg.width() / self.scale_factor
        h = qimg.height() / self.scale_factor
        outimg = qimg.scaled(w, h, Qt.IgnoreAspectRatio, Qt.FastTransformation)
        return outimg
コード例 #2
0
    def create_histogram(self, list_selected, list_color):

        class_area = []
        for my_class in list_selected:
            my_area = []
            for blob in self.ann.seg_blobs:
                if blob.class_name == my_class:
                   blob_area = blob.area * self.scale_factor * self.scale_factor / 100
                   blob_area = np.around(blob_area, decimals=2)
                   my_area.append(blob_area)

            class_area.append(my_area)

        max_area = np.zeros(len(list_selected))
        sum_area = np.zeros(len(list_selected))

        for i in range(0, len(list_selected)):
            area_array = np.asarray(class_area[i])
            max_area[i] = max(area_array)
            sum_area[i] = sum(area_array)

        # histogram plot
        total_coverage = sum(sum_area)
        bins = np.arange(0, max(max_area), 100)
        colors = [tuple(np.asanyarray(list_color[i])/255)for i in range(0, len(list_selected))]
        areas = [np.asarray(class_area[i]) for i in range(0, len(list_selected))]
        patches = [mpatches.Patch(color=tuple(np.asanyarray(list_color[i]) / 255),   label='%.4f' % (sum_area[i] / 10000) + " m^2 " + list_selected[i]) for i in range(0, len(list_selected))]

        fig = plt.figure()
        fig.set_size_inches(10, 6.5)
        plt.legend(handles=patches)
        plt.hist(areas, bins, color=colors)
        plt.xlabel("Colonies area (cm^2)")
        plt.ylabel("Number of colonies")

        txt = "Total coverage {:.4f}".format(total_coverage/10000.0) + " m^2"
        if self.year is not None:
            txt += " (" + str(self.year) + ")"

        plt.title(txt)
        #plt.show()

        buf = io.BytesIO()
        fig.savefig(buf, format="png", dpi=180)
        buf.seek(0)
        img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
        buf.close()
        im = cv2.imdecode(img_arr, 1)
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

        # numpy array to QPixmap
        qimg = utils.rgbToQImage(im)
        qimg = qimg.scaled(self.preview_W, self.preview_H, Qt.KeepAspectRatio, Qt.SmoothTransformation)
        pxmap = QPixmap(qimg)

        self.lblPreview.setPixmap(pxmap)
コード例 #3
0
ファイル: Annotation.py プロジェクト: brwfbx/TagLab
    def create_label_map(self, size, labels_info):
        """
        Create a label map as a QImage and returns it.
        """

        # create a black canvas of the same size of your map
        w = size.width()
        h = size.height()

        imagebox = [0, 0, h, w]
        image = np.zeros([h, w, 3], np.uint8)

        for i, blob in enumerate(self.seg_blobs):
            if not blob.qpath_gitem.isVisible():
                continue

            if blob.class_name == "Empty":
                rgb = [255, 255, 255]
            else:
                class_color = labels_info[blob.class_name]
                rgb = class_color

            mask = blob.getMask().astype(
                bool)  #bool is required for bitmask indexing
            box = blob.bbox
            (box[2], box[3]) = (box[3] + box[0], box[2] + box[1])
            #box is now startx,starty,endx,endy

            #range is the interection of box and imagebox
            range = [
                max(box[0], imagebox[0]),
                max(box[1], imagebox[1]),
                min(box[2], imagebox[2]),
                min(box[3], imagebox[3])
            ]
            subimage = image[range[0] - imagebox[0]:range[2] - imagebox[0],
                             range[1] - imagebox[1]:range[3] - imagebox[1]]
            submask = mask[range[0] - box[0]:range[2] - box[0],
                           range[1] - box[1]:range[3] - box[1]]

            #use the binary mask to assign a color
            subimage[submask] = rgb

            #create 1px border: dilate then subtract the mask.
            border = binary_dilation(submask) & ~submask

            #select only the border over blobs of the same color and draw the border
            samecolor = np.all(subimage == rgb, axis=-1)
            subimage[border & samecolor] = [0, 0, 0]

        labelimg = utils.rgbToQImage(image)
        return labelimg
コード例 #4
0
ファイル: Coraline.py プロジェクト: OlegJakushkin/TagLab
def segment(img, mask, l=0, conservative=0.1, grow=0, radius=30):
    if lib is None:
        raise Exception(
            "Coraline library (libcoraline.so, coraline.dll) not found.")

    img = gaussian(img, sigma=1.5, multichannel=False)
    img = img * 255
    img = img.astype(np.uint8)
    qimg = utils.rgbToQImage(img)
    qimg.save("Smoothed.jpg")

    w = img.shape[1]
    h = img.shape[0]
    W = mask.shape[1]
    H = mask.shape[0]
    if (w != W) or (h != H):
        print(w, h, W, H)
        exit(0)

    #print(C.c_float(l), l)
    lib.Coraline_segment(C.c_void_p(img.ctypes.data),
                         C.c_void_p(mask.ctypes.data), C.c_int(w), C.c_int(h),
                         C.c_float(l), C.c_float(conservative),
                         C.c_float(grow), C.c_float(radius))
コード例 #5
0
    def run(self, img_map, TILE_SIZE, AGGREGATION_WINDOW_SIZE, AGGREGATION_STEP):
        """

        :param TILE_SIZE: Base tile. This corresponds to the INPUT SIZE of the network.
        :param AGGREGATION_WINDOW_SIZE: Size of the sub-windows to consider for the aggregation.
        :param AGGREGATION_STEP: Step, in pixels, to calculate the different scores.
        :return:
        """

        # create a temporary folder to store the processing
        temp_dir = "temp"
        if not os.path.exists(temp_dir):
            os.mkdir(temp_dir)

        # prepare for running..
        STEP_SIZE = AGGREGATION_WINDOW_SIZE

        W = img_map.width()
        H = img_map.height()

        # top, left, width, height
        working_area = [0, 0, W, H]

        wa_top = working_area[0]
        wa_left = working_area[1]
        wa_width = working_area[2]
        wa_height = working_area[3]

        if wa_top < AGGREGATION_STEP:
            wa_top = AGGREGATION_STEP

        if wa_left < AGGREGATION_STEP:
            wa_left = AGGREGATION_STEP

        if wa_left + wa_width >= W - AGGREGATION_STEP:
            wa_width = W - AGGREGATION_STEP - wa_left - 1

        if wa_top + wa_height >= H - AGGREGATION_STEP:
            wa_height = H - AGGREGATION_STEP - wa_top - 1

        tile_cols = int(wa_width / AGGREGATION_WINDOW_SIZE) + 1
        tile_rows = int(wa_height / AGGREGATION_WINDOW_SIZE) + 1

        if torch.cuda.is_available():
            device = torch.device("cuda")
            self.net.to(device)
            torch.cuda.synchronize()

        self.net.eval()

        # classification (per-tiles)
        tiles_number = tile_rows * tile_cols

        self.processing_step = 0
        self.total_processing_steps = 19 * tiles_number

        for row in range(tile_rows):

            if self.flagStopProcessing is True:
                break

            for col in range(tile_cols):

                if self.flagStopProcessing is True:
                    break

                scores = np.zeros((9, self.nclasses, TILE_SIZE, TILE_SIZE))

                k = 0
                for i in range(-1,2):
                    for j in range(-1,2):

                        top = wa_top - AGGREGATION_STEP + row * STEP_SIZE + i * AGGREGATION_STEP
                        left = wa_left - AGGREGATION_STEP + col * STEP_SIZE + j * AGGREGATION_STEP
                        cropimg = utils.cropQImage(img_map, [top, left, TILE_SIZE, TILE_SIZE])
                        img_np = utils.qimageToNumpyArray(cropimg)

                        img_np = img_np.astype(np.float32)
                        img_np = img_np / 255.0

                        # H x W x C --> C x H x W
                        img_np = img_np.transpose(2, 0, 1)

                        # Normalization (average subtraction)
                        img_np[0] = img_np[0] - self.average_norm[0]
                        img_np[1] = img_np[1] - self.average_norm[1]
                        img_np[2] = img_np[2] - self.average_norm[2]

                        with torch.no_grad():

                            img_tensor = torch.from_numpy(img_np)
                            input = img_tensor.unsqueeze(0)

                            if torch.cuda.is_available():
                                input = input.to(device)

                            outputs = self.net(input)

                            scores[k] = outputs[0].cpu().numpy()
                            k = k + 1

                            self.processing_step += 1
                            self.updateProgress.emit( (100.0 * self.processing_step) / self.total_processing_steps )
                            QCoreApplication.processEvents()


                if self.flagStopProcessing is True:
                    break

                # preds_avg, preds_bayesian = self.aggregateScores(scores, tile_sz=TILE_SIZE,
                #                                     center_window_size=AGGREGATION_WINDOW_SIZE, step=AGGREGATION_STEP)

                preds_avg = self.aggregateScores(scores, tile_sz=TILE_SIZE,
                                                     center_window_size=AGGREGATION_WINDOW_SIZE, step=AGGREGATION_STEP)

                values_t, predictions_t = torch.max(torch.from_numpy(preds_avg), 0)
                preds = predictions_t.cpu().numpy()

                resimg = np.zeros((preds.shape[0], preds.shape[1], 3), dtype='uint8')

                for label_index in range(self.nclasses):
                    resimg[preds == label_index, :] = self.label_colors[label_index]

                tilename = str(row) + "_" + str(col) + ".png"
                filename = os.path.join(temp_dir, tilename)
                utils.rgbToQImage(resimg).save(filename)

                self.processing_step += 1
                self.updateProgress.emit( (100.0 * self.processing_step) / self.total_processing_steps )
                QCoreApplication.processEvents()

        # put tiles together
        qimglabel = QImage(W, H, QImage.Format_RGB32)

        xoffset = 0
        yoffset = 0

        painter = QPainter(qimglabel)

        for r in range(tile_rows):
            for c in range(tile_cols):
                tilename = str(r) + "_" + str(c) + ".png"
                filename = os.path.join(temp_dir, tilename)
                qimg = QImage(filename)

                xoffset = wa_left + c * AGGREGATION_WINDOW_SIZE
                yoffset = wa_top + r * AGGREGATION_WINDOW_SIZE

                cut = False
                W_prime = wa_width
                H_prime = wa_height

                if xoffset + AGGREGATION_WINDOW_SIZE > wa_left + wa_width - 1:
                    W_prime = wa_width + wa_left - xoffset - 1
                    cut = True

                if yoffset + AGGREGATION_WINDOW_SIZE > wa_top + wa_height - 1:
                    H_prime = wa_height + wa_top - yoffset - 1
                    cut = True

                if cut is True:
                    qimg2 = qimg.copy(0, 0, W_prime, H_prime)
                    painter.drawImage(xoffset, yoffset, qimg2)
                else:
                    painter.drawImage(xoffset, yoffset, qimg)

        # detach the qimglabel otherwise the Qt EXPLODES when memory is free
        painter.end()

        labelfile = os.path.join(temp_dir, "labelmap.png")
        qimglabel.save(labelfile)

        torch.cuda.empty_cache()
        del self.net
        self.net = None
コード例 #6
0
    def run(self,
            TILE_SIZE,
            AGGREGATION_WINDOW_SIZE,
            AGGREGATION_STEP,
            save_scores=False):
        """
        :param TILE_SIZE: Base tile. This corresponds to the INPUT SIZE of the network.
        :param AGGREGATION_WINDOW_SIZE: Size of the center window considered for the aggregation.
        :param AGGREGATION_STEP: Step, in pixels, to calculate the different scores.
        :return:
        """

        # create a temporary folder to store the processing
        if not os.path.exists(self.temp_dir):
            os.mkdir(self.temp_dir)

        # prepare for running..
        DELTA_CROP = int((TILE_SIZE - AGGREGATION_WINDOW_SIZE) / 2)
        tile_cols = int(self.wa_width / AGGREGATION_WINDOW_SIZE) + 1
        tile_rows = int(self.wa_height / AGGREGATION_WINDOW_SIZE) + 1

        if torch.cuda.is_available():
            device = torch.device("cuda")
            self.net.to(device)
            torch.cuda.synchronize()

        self.net.eval()

        # classification (per-tiles)
        tiles_number = tile_rows * tile_cols

        self.processing_step = 0
        self.total_processing_steps = 19 * tiles_number

        for row in range(tile_rows):

            if self.flagStopProcessing is True:
                break

            for col in range(tile_cols):

                if self.flagStopProcessing is True:
                    break

                scores = np.zeros((9, self.nclasses, TILE_SIZE, TILE_SIZE))

                k = 0
                for i in range(-1, 2):
                    for j in range(-1, 2):

                        top = self.wa_top - DELTA_CROP + row * AGGREGATION_WINDOW_SIZE + i * AGGREGATION_STEP
                        left = self.wa_left - DELTA_CROP + col * AGGREGATION_WINDOW_SIZE + j * AGGREGATION_STEP
                        tileimg = utils.cropQImage(
                            self.input_image,
                            [top, left, TILE_SIZE, TILE_SIZE])
                        img_np = utils.qimageToNumpyArray(tileimg)

                        img_np = img_np.astype(np.float32)
                        img_np = img_np / 255.0

                        # H x W x C --> C x H x W
                        img_np = img_np.transpose(2, 0, 1)

                        # Normalization (average subtraction)
                        img_np[0] = img_np[0] - self.average_norm[0]
                        img_np[1] = img_np[1] - self.average_norm[1]
                        img_np[2] = img_np[2] - self.average_norm[2]

                        with torch.no_grad():

                            img_tensor = torch.from_numpy(img_np)
                            input = img_tensor.unsqueeze(0)

                            if torch.cuda.is_available():
                                input = input.to(device)

                            outputs = self.net(input)

                            scores[k] = outputs[0].cpu().numpy()
                            k = k + 1

                            self.processing_step += 1
                            self.updateProgress.emit(
                                (100.0 * self.processing_step) /
                                self.total_processing_steps)
                            QCoreApplication.processEvents()

                if self.flagStopProcessing is True:
                    break

                preds_avg = self.aggregateScores(
                    scores,
                    tile_sz=TILE_SIZE,
                    center_window_size=AGGREGATION_WINDOW_SIZE,
                    step=AGGREGATION_STEP)

                values_t, predictions_t = torch.max(
                    torch.from_numpy(preds_avg), 0)
                preds = predictions_t.cpu().numpy()

                resimg = np.zeros((preds.shape[0], preds.shape[1], 3),
                                  dtype='uint8')
                for label_index in range(self.nclasses):
                    resimg[preds ==
                           label_index, :] = self.label_colors[label_index]

                tilename = str(row) + "_" + str(col) + ".png"
                filename = os.path.join(self.temp_dir, tilename)
                utils.rgbToQImage(resimg).save(filename)

                if save_scores is True:
                    tilename = str(row) + "_" + str(col) + ".dat"
                    filename = os.path.join(self.temp_dir, tilename)
                    fileobject = open(filename, 'wb')
                    pkl.dump(preds_avg, fileobject)
                    fileobject.close()

                self.processing_step += 1
                self.updateProgress.emit((100.0 * self.processing_step) /
                                         self.total_processing_steps)
                QCoreApplication.processEvents()

        self.assembleTiles(tile_rows,
                           tile_cols,
                           AGGREGATION_WINDOW_SIZE,
                           ass_scores=save_scores)
        torch.cuda.empty_cache()
        del self.net
        self.net = None