コード例 #1
0
ファイル: buddies.py プロジェクト: snaiperskaya96/sdaps
    def prepare_mask(self):
        img = self.obj.sheet.get_page_image(self.obj.page_number)
        width, height = self.obj.width, self.obj.height

        matrix = list(img.recognize.matrix)
        # Remove any offset from the matrix
        matrix[4] = 0
        matrix[5] = 0
        matrix = cairo.Matrix(*matrix)

        px_width, px_height = matrix.transform_distance(width, height)
        px_width, px_height = int(math.ceil(px_width)), int(
            math.ceil(px_height))

        surf = cairo.ImageSurface(cairo.FORMAT_A1, px_width, px_height)
        cr = cairo.Context(surf)
        cr.set_source_rgba(0, 0, 0, 0)
        cr.set_operator(cairo.OPERATOR_SOURCE)
        cr.paint()

        # Move to center and apply matrix
        cr.translate(0.5 * px_width, 0.5 * px_height)
        cr.transform(matrix)

        cr.set_source_rgba(0, 0, 0, 1)

        line_width = 1 / 72.0 * 25.4
        cr.set_line_width(line_width)

        matrix.invert()
        xoff, yoff = matrix.transform_distance(px_width / 2.0, px_height / 2.0)
        xoff = xoff - width / 2
        yoff = yoff - width / 2

        return cr, surf, line_width, width, height, xoff, yoff
コード例 #2
0
ファイル: buddies.py プロジェクト: bgeneto/sdaps
    def prepare_mask(self):
        img = self.obj.sheet.get_page_image(self.obj.page_number)
        width, height = self.obj.width, self.obj.height

        matrix = list(img.recognize.matrix)
        # Remove any offset from the matrix
        matrix[4] = 0
        matrix[5] = 0
        matrix = cairo.Matrix(*matrix)

        px_width, px_height = matrix.transform_distance(width, height)
        px_width, px_height = int(math.ceil(px_width)), int(math.ceil(px_height))

        surf = cairo.ImageSurface(cairo.FORMAT_A1, px_width, px_height)
        cr = cairo.Context(surf)
        cr.set_source_rgba(0, 0, 0, 0)
        cr.set_operator(cairo.OPERATOR_SOURCE)
        cr.paint()

        # Move to center and apply matrix
        cr.translate(0.5 * px_width, 0.5 * px_height)
        cr.transform(matrix)

        cr.set_source_rgba(0, 0, 0, 1)

        line_width = 1 / 72.0 * 25.4
        cr.set_line_width(line_width)

        matrix.invert()
        xoff, yoff = matrix.transform_distance(px_width / 2.0, px_height / 2.0)
        xoff = xoff - width / 2
        yoff = yoff - height / 2

        return cr, surf, line_width, width, height, xoff, yoff
コード例 #3
0
ファイル: buddies.py プロジェクト: snaiperskaya96/sdaps
    def recognize(self):
        img = self.obj.sheet.get_page_image(self.obj.page_number)

        if img is None or img.recognize.matrix is None:
            self.obj.sheet.valid = 0
            return

        surf, xoff, yoff = self.get_outline_mask()

        matrix, covered = img.recognize.correction_matrix_masked(
            self.obj.x, self.obj.y, surf)
        # Calculate some sort of quality for the checkbox position
        if covered < defs.image_line_coverage:
            pos_quality = 0
        else:
            pos_quality = min(covered + 0.2, 1)

        x, y = matrix.transform_point(self.obj.x, self.obj.y)
        width, height = matrix.transform_distance(self.obj.width,
                                                  self.obj.height)
        self.obj.data.x = x + xoff
        self.obj.data.y = y + yoff
        self.obj.data.width = width
        self.obj.data.height = height

        # The debug struct will be filled in if debugging is enabled in the
        # C library. This is done by the boxgallery script currently.
        self.debug = {}

        mask, xoff, yoff = self.get_inner_mask()
        x, y = self.obj.data.x, self.obj.data.y
        x, y = x + xoff, y + yoff

        x, y = img.recognize.matrix.transform_point(x, y)
        x, y = int(x), int(y)

        remove_line_width = 1.2 * 25.4 / 72.0
        remove_line_width_px = max(
            img.recognize.matrix.transform_distance(remove_line_width,
                                                    remove_line_width))

        coverage = img.recognize.get_masked_coverage(mask, x, y)
        self.obj.data.metrics['coverage'] = coverage
        self.debug['coverage'] = image.get_debug_surface()

        # Remove 3 lines with width 1.2pt(about 5px).
        coverage = img.recognize.get_masked_coverage_without_lines(
            mask, x, y, remove_line_width_px, 3)
        self.obj.data.metrics['cov-lines-removed'] = coverage
        self.debug['cov-lines-removed'] = image.get_debug_surface()

        count, coverage = img.recognize.get_masked_white_area_count(
            mask, x, y, 0.05, 1.0)
        self.obj.data.metrics['cov-min-size'] = coverage
        self.debug['cov-min-size'] = image.get_debug_surface()

        state = 0
        quality = -1
        # Iterate the ranges
        for metric, value in self.obj.data.metrics.iteritems():
            metric = defs.checkbox_metrics[
                self.obj.sheet.survey.defs.checkmode][metric]

            for lower, upper in zip(metric[:-1], metric[1:]):
                if value >= lower[0] and value <= upper[0]:
                    # Interpolate quality value
                    if lower[0] != upper[0]:
                        metric_quality = lower[2] + (upper[2] - lower[2]) * (
                            value - lower[0]) / (upper[0] - lower[0])
                    else:
                        metric_quality = lower[2]

                    if metric_quality > quality:
                        state = lower[1]
                        quality = metric_quality

        self.obj.data.state = state
        self.obj.data.quality = min(quality, pos_quality)
コード例 #4
0
ファイル: buddies.py プロジェクト: bgeneto/sdaps
    def recognize(self):
        img = self.obj.sheet.get_page_image(self.obj.page_number)

        if img is None or img.recognize.matrix is None:
            self.obj.sheet.valid = 0
            return

        surf, xoff, yoff = self.get_outline_mask()

        matrix, covered = img.recognize.correction_matrix_masked(
            self.obj.x, self.obj.y,
            surf
        )
        # Calculate some sort of quality for the checkbox position
        if covered < defs.image_line_coverage:
            pos_quality = 0
        else:
            pos_quality = min(covered + 0.2, 1)

        x, y = matrix.transform_point(self.obj.x, self.obj.y)
        width, height = matrix.transform_distance(self.obj.width, self.obj.height)
        self.obj.data.x = x + xoff
        self.obj.data.y = y + yoff
        self.obj.data.width = width
        self.obj.data.height = height

        # The debug struct will be filled in if debugging is enabled in the
        # C library. This is done by the boxgallery script currently.
        self.debug = {}

        mask, xoff, yoff = self.get_inner_mask()
        x, y = self.obj.data.x, self.obj.data.y
        x, y = x + xoff, y + yoff

        x, y = img.recognize.matrix.transform_point(x, y)
        x, y = int(x), int(y)

        remove_line_width = 1.2 * 25.4 / 72.0
        remove_line_width_px = max(img.recognize.matrix.transform_distance(remove_line_width, remove_line_width))

        coverage = img.recognize.get_masked_coverage(mask, x, y)
        self.obj.data.metrics['coverage'] = coverage
        self.debug['coverage'] = image.get_debug_surface()

        # Remove 3 lines with width 1.2pt(about 5px).
        coverage = img.recognize.get_masked_coverage_without_lines(mask, x, y, remove_line_width_px, 3)
        self.obj.data.metrics['cov-lines-removed'] = coverage
        self.debug['cov-lines-removed'] = image.get_debug_surface()

        count, coverage = img.recognize.get_masked_white_area_count(mask, x, y, 0.05, 1.0)
        self.obj.data.metrics['cov-min-size'] = coverage
        self.debug['cov-min-size'] = image.get_debug_surface()

        state = 0
        quality = -1
        # Iterate the ranges
        for metric, value in self.obj.data.metrics.iteritems():
            metric = defs.checkbox_metrics[self.obj.sheet.survey.defs.checkmode][metric]

            for lower, upper in zip(metric[:-1], metric[1:]):
                if value >= lower[0] and value <= upper[0]:
                    # Interpolate quality value
                    if lower[0] != upper[0]:
                        metric_quality = lower[2] + (upper[2] - lower[2]) * (value - lower[0]) / (upper[0] - lower[0])
                    else:
                        metric_quality = lower[2]

                    if metric_quality > quality:
                        state = lower[1]
                        quality = metric_quality

        self.obj.data.state = state
        self.obj.data.quality = min(quality, pos_quality)