Example #1
0
def comp_pixel_ternary(image):  # 3x3 and 2x2 pixel cross-correlation within image
    # orthogonal comp
    orthdy__ = image[1:] - image[:-1]       # vertical
    orthdx__ = image[:, 1:] - image[:, :-1] # horizontal

    # compute gdert__
    p__ = (image[:-2, :-2] + image[:-2, 1:-1] + image[1:-1, :-2] + image[1:-1, 1:-1]) * 0.25
    dy__ = (orthdy__[:-1, 1:-1] + orthdy__[:-1, :-2]) * 0.5
    dx__ = (orthdx__[1:-1, :-1] + orthdx__[:-2, :-1]) * 0.5
    g__ = ma.hypot(dy__, dx__)
    gdert__ = ma.stack((p__, g__, dy__, dx__))

    # diagonal comp
    diag1__ = image[2:, 2:] - image[:-2, :-2]
    diag2__ = image[2:, :-2] - image[:-2, 2:]

    # compute rdert__
    p3__ = image[1:-1, 1:-1]
    dy3__ = (orthdy__[1:, 1:-1] + orthdy__[:-1, 1:-1]) * 0.25 +\
            (diag1__ + diag2__) * 0.125
    dx3__ = (orthdx__[1:-1, 1:] + orthdx__[1:-1, :-1]) * 0.25 + \
            (diag1__ - diag2__) * 0.125
    g3__ = ma.hypot(dy3__, dx3__)
    rdert__ = ma.stack((p3__, g3__, dy3__, dx3__))

    # gdert__ = comp_2x2(image)  # cross-compare four adjacent pixels diagonally
    # rdert__ = comp_3x3(image)  # compare each pixel to 8 rim pixels

    return gdert__, rdert__
Example #2
0
def comp_a(dert__, rng):
    """
    Compute and compare a over predetermined range.
    """
    # Unpack dert__:
    if len(dert__) in (5, 12): # idert or full dert with m.
        i__, g__, m__, dy__, dx__ = dert__[:5]
    else: # idert or full dert without m.
        i__, g__, dy__, dx__ = dert__[:4]

    if len(dert__) > 10: # if ra+:
        a__ = dert__[-7:-5] # Computed angle (use reverse indexing to avoid m check).
        day__ = dert__[-4:-2] # Accumulated day__.
        dax__ = dert__[-2:] # Accumulated day__.
    else: # if fa:
        # Compute angles:
        a__ = ma.stack((dy__, dx__), axis=0) / g__
        a__.mask = g__.mask

        # Initialize dax, day:
        day__, dax__ = [ma.zeros((2,) + i__.shape) for _ in range(2)]

    # Compute angle differences:
    da__ = translated_operation(a__, rng, angle_diff)
    comp_field = central_slice(rng)


    # Decompose and add to corresponding day and dax:
    day__[comp_field] = (da__ * Y_COEFFS[rng]).mean(axis=-1)
    dax__[comp_field] = (da__ * X_COEFFS[rng]).mean(axis=-1)

    # Apply mask:
    msq = np.ones(a__.shape, dtype=int) # Rim mask.
    msq[comp_field] = a__.mask[comp_field] + da__.mask.sum(axis=-1) # Summed d mask.
    imsq = msq.nonzero()
    day__[imsq] = dax__[imsq] = ma.masked # Apply mask.

    # Compute ga:
    ga__ = ma.hypot(
        ma.arctan2(*day__),
        ma.arctan2(*dax__)
    )[np.newaxis, ...] * SCALER_ga

    try: # dert with m is more common:
        return ma.concatenate( # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, m__, dy__, dx__), axis=0),
                a__, ga__, day__, dax__,
            ),
            axis=0,
        )
    except NameError: # m doesn't exist:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, dy__, dx__), axis=0),
                a__, ga__, day__, dax__,
            ),
            axis=0,
        )
Example #3
0
def comp_a(gdert__, rng):
    """
    Compute and compare a over predetermined range.
    """
    # Unpack dert__:
    try:
        g, gg, m, dy, dx = gdert__
    except ValueError:  # Initial dert doesn't contain m.
        g, gg, dy, dx = gdert__

    # Initialize dax, day:
    day, dax = [ma.zeros((2, ) + g.shape) for _ in range(2)]

    # Compute angles:
    a = ma.stack((dy, dx), axis=0) / gg
    a.mask = gg.mask

    # Compute angle differences:
    da = translated_operation(a, rng, angle_diff)
    comp_field = central_slice(rng)

    # Decompose and add to corresponding day and dax:
    day[comp_field] = (da * Y_COEFFS[rng]).mean(axis=-1)
    dax[comp_field] = (da * X_COEFFS[rng]).mean(axis=-1)

    # Apply mask:
    msq = np.ones(a.shape, dtype=int)  # Rim mask.
    msq[comp_field] = a.mask[comp_field] + da.mask.sum(
        axis=-1)  # Summed d mask.
    imsq = msq.nonzero()
    day[imsq] = dax[imsq] = ma.masked  # Apply mask.

    # Compute ga:
    ga = ma.hypot(ma.arctan2(*day), ma.arctan2(*dax))[np.newaxis,
                                                      ...] * SCALER_ga

    try:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((g, gg, m), axis=0),
                ga,
                day,
                dax,
            ),
            axis=0,
        )
    except NameError:  # m doesn't exist.
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((g, gg), axis=0),
                a,
                ga,
                day,
                dax,
            ),
            axis=0,
        )
Example #4
0
    def _calc_cddcwd(self, values, threshold, calc_type):
        """ Calculates maximum number of consecutive days with daily values (precipitation) < 1mm or >= 1mm (CDD or CWD).
        """

        if calc_type == 'cdd':
            cmp_func = operator.lt
        elif calc_type == 'cwd':
            cmp_func = operator.ge
        else:
            self.logger.error('Unknown calculation type: %s. Aborting!',
                              calc_type)
            raise ValueError

        data_shape = values.shape[1:]
        cnt = ma.zeros(data_shape)
        max_cnt = ma.zeros(data_shape)
        for arr in values:
            mask = cmp_func(arr, threshold)
            cnt += mask  # Count consecutive days.
            cnt *= mask  # Reset counter where condition does not meet.
            max_cnt = ma.max(ma.stack((max_cnt, cnt)), axis=0)

        max_cnt.mask = values[
            0].mask  # Restore mask from the original data array.

        return max_cnt
Example #5
0
def comp_g(dert__, rng):
    """
    Compare g over predetermined range.
    """
    # Unpack dert__:
    g, m, dy, dx = dert__

    # Compare gs:
    d = translated_operation(g, rng, op.sub)
    comp_field = central_slice(rng)

    # Decompose and add to corresponding dy and dx:
    dy[comp_field] += (d * Y_COEFFS[rng]).sum(axis=-1)
    dx[comp_field] += (d * X_COEFFS[rng]).sum(axis=-1)

    # Compute ms:
    m[comp_field] += translated_operation(g, rng, ma.minimum).sum(axis=-1)

    # Apply mask:
    msq = np.ones(g.shape, dtype=int)  # Rim mask.
    msq[comp_field] = g.mask[comp_field] + d.mask.sum(
        axis=-1)  # Summed d mask.
    imsq = msq.nonzero()
    m[imsq] = dy[imsq] = dx[imsq] = ma.masked  # Apply mask.

    # Compute gg:
    gg = ma.hypot(dy, dx) * SCALER_g[rng]

    return ma.stack((g, gg, m, dy, dx),
                    axis=0)  # ma.stack() for extra array dimension.
Example #6
0
    def calc_rxndays(self, values, time_grid, threshold):
        """ Calculates Rxnday
        Arguments:
            values -- array of total precipitation
            time_grid -- time grid
            threshold -- number of days (n)
        Returns: Rxnday values
        """

        # First of all, we suppose values are for a month with some time step.
        # Initially, let's sum first 'threshold' days.
        # Then we slide a window along the time grid
        #  subtracting one day on the left and adding one day on the right.
        queue = deque()
        nday_sum = None
        max_sum = None
        it_all_data = groupby(zip(values, time_grid),
                              key=lambda x: (x[1].day, x[1].month))
        for _, one_day_group in it_all_data:  # Iterate over daily groups.
            daily_sum = self._calc_daily_sum(one_day_group)
            queue.append(daily_sum)  # Store daily sums in a queue.
            if nday_sum is None:
                nday_sum = ma.zeros(daily_sum.shape)
            nday_sum += daily_sum  # Additionally sum daily sums.
            if len(queue) > threshold:  # When 'threshold' days are summed...
                nday_sum -= queue.popleft(
                )  # ...subtruct one 'left-most' daily sum from the n-day sum.
            if len(queue) == threshold:
                if max_sum is None:
                    max_sum = deepcopy(nday_sum)
                else:
                    max_sum = ma.max(ma.stack((max_sum, nday_sum)),
                                     axis=0)  # Search for maximum value.

        return max_sum
Example #7
0
def comp_angle(a__):
    """Compare angles."""

    # handle mask
    if isinstance(a__, ma.masked_array):
        a__.data[a__.mask] = np.nan
        a__.mask = ma.nomask

    # comparison
    da__, a__ = translated_operation(a__, rng=rng, operator=angle_diff)

    # sum within kernels
    day__ = (da__ * Y_COEFFS[rng]).sum(axis=-1)
    dax__ = (da__ * X_COEFFS[rng]).sum(axis=-1)

    # compute gradient magnitudes (how fast angles are changing)
    ga__ = np.hypot(np.arctan2(*day__), np.arctan2(*dax__))

    # pack into dert
    dert__ = ma.stack((*a__, ga__, *day__, *dax__), axis=0)

    # handle mask
    dert__.mask = np.isnan(dert__.data)

    return dert__
Example #8
0
    def bbox(self, header: PoseHeader):
        data = ma.transpose(self.data, axes=POINTS_DIMS)

        # Split data by components, `ma` doesn't support ".split"
        components = []
        idx = 0
        for component in header.components:
            components.append(data[list(range(idx,
                                              idx + len(component.points)))])
            idx += len(component.points)

        boxes = [
            ma.stack([ma.min(c, axis=0), ma.max(c, axis=0)])
            for c in components
        ]
        boxes_cat = ma.concatenate(boxes)
        if type(boxes_cat.mask
                ) == np.bool_:  # Sometimes, it doesn't concatenate the mask...
            boxes_mask = ma.concatenate([b.mask for b in boxes])
            boxes_cat = ma.array(boxes_cat, mask=boxes_mask)

        new_data = ma.transpose(boxes_cat, axes=POINTS_DIMS)

        confidence_mask = np.split(new_data.mask, [-1], axis=3)[0]
        confidence_mask = np.squeeze(confidence_mask, axis=-1)
        confidence = np.where(confidence_mask == True, 0, 1)

        return NumPyPoseBody(self.fps, new_data, confidence)
Example #9
0
def comp_g_old(
    dert__
):  # cross-comp of g in 2x2 kernels, between derts in ma.stack dert__

    g__, dy__, dx__ = dert__[[3, 4, 5]]  # g, dy, dx -> local i, idy, idx
    g__[ma.where(
        g__ == 0
    )] = 1  # replace 0 values with 1 to avoid error, not needed in high-g blobs?

    g0__, dy0__, dx0__ = g__[:-1, :-1], dy__[:-1, :-1], dx__[:-1, :
                                                             -1]  # top left
    g1__, dy1__, dx1__ = g__[:-1, 1:], dy__[:-1, 1:], dx__[:-1,
                                                           1:]  # top right
    g2__, dy2__, dx2__ = g__[1:, 1:], dy__[1:, 1:], dx__[1:,
                                                         1:]  # bottom right
    g3__, dy3__, dx3__ = g__[1:, :-1], dy__[1:, :-1], dx__[1:, :
                                                           -1]  # bottom left

    sin0__ = dy0__ / g0__
    cos0__ = dx0__ / g0__
    sin1__ = dy1__ / g1__
    cos1__ = dx1__ / g1__
    sin2__ = dy2__ / g2__
    cos2__ = dx2__ / g2__
    sin3__ = dy3__ / g3__
    cos3__ = dx3__ / g3__
    '''
    cosine of difference between diagonally opposite angles, in vector representation
    print(cos_da1__.shape, type(cos_da1__))
    '''
    cos_da0__ = (cos2__ * cos0__) + (sin2__ * sin0__
                                     )  # top left to bottom right
    cos_da1__ = (cos3__ * cos1__) + (sin3__ * sin1__
                                     )  # top right to bottom left

    dgy__ = ((g3__ + g2__) - (g0__ * cos_da0__ + g1__ * cos_da1__))
    # y-decomposed cosine difference between gs
    dgx__ = ((g1__ + g2__) - (g0__ * cos_da0__ + g3__ * cos_da1__))
    # x-decomposed cosine difference between gs

    gg__ = np.hypot(dgy__, dgx__)  # gradient of gradient

    mg0__ = np.minimum(g0__, g2__) * cos_da0__  # g match = min(g, _g) *cos(da)
    mg1__ = np.minimum(g1__, g3__) * cos_da1__
    mg__ = mg0__ + mg1__

    g__ = g__[:-1, :
              -1]  # remove last row and column to align with derived params
    dy__ = dy__[:-1, :-1]
    dx__ = dx__[:-1, :-1]  # -> idy, idx to compute cos for comp rg

    # no longer needed: g__.mask = dy__.mask = dx__.mask = gg__.mask?
    '''
    next comp_rg will use g, dy, dx     
    next comp_gg will use gg, dgy, dgx  
    '''
    return ma.stack((g__, dy__, dx__, gg__, dgy__, dgx__, mg__))
Example #10
0
    def read_v0_0(cls, header: PoseHeader, reader: BufferReader):
        fps, _frames = reader.unpack(ConstStructs.double_ushort)

        _dims = max([len(c.format) for c in header.components]) - 1
        _points = sum([len(c.points) for c in header.components])

        frames_d = []
        frames_c = []
        for _ in range(_frames):
            _people = reader.unpack(ConstStructs.ushort)
            people_d = []
            people_c = []
            for pid in range(_people):
                reader.advance(ConstStructs.short)  # Skip Person ID
                person_d = []
                person_c = []
                for component in header.components:
                    points = np.array(
                        reader.unpack_numpy(
                            ConstStructs.float,
                            (len(component.points), len(component.format))))
                    dimensions, confidence = np.split(points, [-1], axis=1)
                    boolean_confidence = np.where(confidence > 0, 0,
                                                  1)  # To create the mask
                    mask = np.column_stack(
                        tuple([boolean_confidence] *
                              (len(component.format) - 1)))

                    person_d.append(ma.masked_array(dimensions, mask=mask))
                    person_c.append(np.squeeze(confidence, axis=-1))

                if pid == 0:
                    people_d.append(ma.concatenate(person_d))
                    people_c.append(np.concatenate(person_c))

            # In case no person, should all be zeros
            if len(people_d) == 0:
                people_d.append(np.zeros((_points, _dims)))
                people_c.append(np.zeros(_points))

            frames_d.append(ma.stack(people_d))
            frames_c.append(np.stack(people_c))

        return cls(fps, ma.stack(frames_d), ma.stack(frames_c))
Example #11
0
def comp_a(dert__, fga):  # cross-comp of a or aga in 2x2 kernels
    '''
    if fga: dert = (g, gg, dgy, dgx, gm, ?(iga, iday, idax)
    else:   dert = (i, g, dy, dx, ?m)
    '''
    dert__ = shape_check(dert__)  # remove derts of incomplete kernels
    i__, g__, dy__, dx__, = dert__[0:4]

    if fga:  # input is adert
        ga__, day__, dax__ = dert__[4], dert__[5:7], dert__[7:9]
        a__ = [day__[0], day__[1], dax__[0], dax__[1]] / ga__
    else:
        a__ = [dy__, dx__] / g__  # similar to calc_a

    # each shifted a in 2x2 kernel
    a__topleft = a__[:, :-1, :-1]
    a__topright = a__[:, :-1, 1:]
    a__botright = a__[:, 1:, 1:]
    a__botleft = a__[:, 1:, :-1]

    # diagonal angle differences:
    sin_da0__, cos_da0__ = angle_diff(a__topleft, a__botright, fga)
    sin_da1__, cos_da1__ = angle_diff(a__topright, a__botleft, fga)

    ma__ = np.hypot(sin_da0__ + 1, cos_da0__ + 1) + np.hypot(sin_da1__ + 1, cos_da1__ + 1)
    # ma = inverse angle match = SAD: covert sin and cos da to 0->2 range

    day__ = (-sin_da0__ - sin_da1__), (cos_da0__ + cos_da1__)
    # angle change in y, sines are sign-reversed because da0 and da1 are top-down, no reversal in cosines

    dax__ = (-sin_da0__ + sin_da1__), (cos_da0__ + cos_da1__)
    # angle change in x, positive sign is right-to-left, so only sin_da0__ is sign-reversed
    '''
    sin(-θ) = -sin(θ), cos(-θ) = cos(θ): 
    sin(da) = -sin(-da), cos(da) = cos(-da) => (sin(-da), cos(-da)) = (-sin(da), cos(da))
    '''
    ga__ = np.hypot(np.arctan2(*day__), np.arctan2(*dax__))
    # angle gradient, a scalar, to evaluate for comp_aga

    adert__ = ma.stack((i__[:-1, :-1],  # for summation in Dert
                        g__[:-1, :-1],  # for summation in Dert
                        dy__[:-1, :-1],  # passed on as idy
                        dx__[:-1, :-1],  # passed on as idx  # no use for m__[:-1, :-1]?
                        ga__,
                        *day__,
                        *dax__,
                        ma__,
                        cos_da0__,
                        cos_da1__
                        ))
    '''
    next comp_g will use g, cos_da0__, cos_da1__, dy, dx (passed to comp_rg as idy, idx)
    next comp_a will use ga, day, dax  # comp_aga
    '''
    return adert__
Example #12
0
def comp_2x2(image):
    """Deprecated."""
    dy__ = (image[1:-1, 1:-1] + image[1:-1, :-2]
            - image[:-2, 1:-1] - image[:-2, :-2]) * 0.5
    dx__ = (image[1:-1, 1:-1] + image[:-2, 1:-1]
            - image[1:-1, :-2] - image[:-2, :-2]) * 0.5
    # sum pixel values and reconstruct central pixel as their average:
    p__ = (image[:-2, :-2] + image[:-2, 1:-1]
           + image[1:-1, :-2] + image[1:-1, 1:-1]) * 0.25
    g__ = np.hypot(dy__, dx__)  # compute gradients per kernel, converted to 0-255 range
    return ma.stack((p__, g__, dy__, dx__))
Example #13
0
def comp_3x3(image):

    # initialization
    dy__ = np.zeros((image.shape[0] - 2, image.shape[1] - 2))
    dx__ = np.zeros((image.shape[0] - 2, image.shape[1] - 2))

    # start in 2nd pixels and end at 2nd last pixels
    # 1st and last pixels doesn't have sufficient 8 surrounding pixels
    for y in range(image.shape[0] - 2):
        for x in range(image.shape[1] - 2):

            # 3x3 kernel, given 9 pixels below:
            #  p1 p2 p3
            #  p8 c1 p4
            #  p7 p6 p5
            # directional differences, d = p - c
            # dx = sum(8 directional differences * x coefficients)
            # dy = sum(8 directional differences * y coefficients)
            # p__ = central pixel , which is c1

            # get difference in 8 directions = directional pixels - center pixel
            d_top_left = (image[y][x] - image[y + 1][x + 1])
            d_top = (image[y][x + 1] - image[y + 1][x + 1])
            d_top_right = (image[y][x + 2] - image[y + 1][x + 1])
            d_right = (image[y + 1][x + 2] - image[y + 1][x + 1])
            d_bottom_right = (image[y + 2][x + 2] - image[y + 1][x + 1])
            d_bottom = (image[y + 2][x + 1] - image[y + 1][x + 1])
            d_bottom_left = (image[y + 2][x] - image[y + 1][x + 1])
            d_left = (image[y + 1][x] - image[y + 1][x + 1])


            dy__[y,x] =   (d_top_left     * YCOEF[0]) +\
                          (d_top          * YCOEF[1]) +\
                          (d_top_right    * YCOEF[2]) +\
                          (d_right        * YCOEF[3]) +\
                          (d_bottom_right * YCOEF[4]) +\
                          (d_bottom       * YCOEF[5]) +\
                          (d_bottom_left  * YCOEF[6]) +\
                          (d_left         * YCOEF[7])

            dx__[y,x] =   (d_top_left     * XCOEF[0]) +\
                          (d_top          * XCOEF[1]) +\
                          (d_top_right    * XCOEF[2]) +\
                          (d_right        * XCOEF[3]) +\
                          (d_bottom_right * XCOEF[4]) +\
                          (d_bottom       * XCOEF[5]) +\
                          (d_bottom_left  * XCOEF[6]) +\
                          (d_left         * XCOEF[7])

    p__ = image[1:-1, 1:-1]  # central pixels
    g__ = np.hypot(
        dy__, dx__)  # compute gradients per kernel, converted to 0-255 range

    return ma.stack((p__, g__, dy__, dx__))
Example #14
0
def translated_array(a, rng):
    """
    Like translated_operation, but without applying operation to slices
    """
    out = ma.stack([a[ts] for ts in TRANSLATING_SLICES_[rng]])

    # Rearrange axes:
    for dim1, dim2 in pairwise(range(out.ndim)):
        out = out.swapaxes(dim1, dim2)

    return out
Example #15
0
def comp_pixel_old(image):  # 2x2 pixel cross-correlation within image

    dy__ = image[1:] - image[:-1]        # orthogonal vertical com
    dx__ = image[:, 1:] - image[:, :-1]  # orthogonal horizontal comp

    p__ = image[:-1, :-1]  #  top-left pixel
    mean_dy__ = (dy__[:, 1:] + dy__[:, :-1]) * 0.5  # mean dy per kernel
    mean_dx__ = (dx__[1:, :] + dx__[:-1, :]) * 0.5  # mean dx per kernel
    g__ = ma.hypot(mean_dy__, mean_dx__)  # central gradient of four rim pixels
    dert__ = ma.stack((p__, g__, mean_dy__, mean_dx__))

    return dert__
Example #16
0
def comp_g(dert__):  # add fga if processing in comp_ga is different?
    """
    Cross-comp of g or ga in 2x2 kernels, between derts in ma.stack dert__:
    input dert  = (i, g, dy, dx, ga, dyy, dxy, dyx, dxx, ma, cos_da0, cos_da1)
    output dert = (g, gg, dgy, dgx, gm, ga, day, dax, dy, dx)
    """

    dert__ = shape_check(dert__)  # remove derts of incomplete kernels
    g__, cos_da0__, cos_da1__ = dert__[[1, -2, -1]]  # top dimension of numpy stack must be a list

    cos_da0__ = cos_da0__[:-1, :-1]
    cos_da1__ = cos_da1__[:-1, :-1]

    g_topleft__ = g__[:-1, :-1]
    g_topright__ = g__[:-1, 1:]
    g_bottomleft__ = g__[1:, :-1]
    g_bottomright__ = g__[1:, 1:]

    dgy__ = ((g_bottomleft__ + g_bottomright__) -
             (g_topleft__ * cos_da0__ + g_topright__ * cos_da1__))
    # y-decomposed cosine difference between gs

    dgx__ = ((g_topright__ + g_bottomright__) -
             (g_topleft__ * cos_da0__ + g_bottomleft__ * cos_da1__))
    # x-decomposed cosine difference between gs

    gg__ = np.hypot(dgy__, dgx__)  # gradient of gradient

    mg0__ = np.minimum(g_topleft__, (g_bottomright__ * cos_da0__))  # g match = min(g, _g*cos(da))
    mg1__ = np.minimum(g_topright__, (g_bottomleft__ * cos_da1__))
    mg__ = mg0__ + mg1__

    gdert = ma.stack((g__[:-1, :-1],  # remove last row and column to align with derived params
                      gg__,
                      dgy__,
                      dgx__,
                      mg__,
                      dert__[4][:-1, :-1],  # ga__
                      dert__[5][:-1, :-1],  # dayy
                      dert__[6][:-1, :-1],  # daxy
                      dert__[7][:-1, :-1],  # dayx
                      dert__[8][:-1, :-1],  # daxx
                      dert__[9][:-1, :-1],  # ma__
                      dert__[2][:-1, :-1],  # idy__
                      dert__[3][:-1, :-1]  # idx__
                      ))
    '''
    next comp_r will use g, idy, idx   # comp_rg
    next comp_a will use ga, day, dax  # comp_agg, also dgy__, dgx__ as idy, idx?
    '''
    return gdert
Example #17
0
def comp_pixel(
    image
):  # comparison between pixel and its neighbours within kernel, for the whole image

    # Initialize variables:
    if kwidth == 2:

        # Compare:
        dy__ = (image[1:, 1:] -
                image[:-1, 1:]) + (image[1:, :-1] - image[:-1, :-1]) * 0.5
        dx__ = (image[1:, 1:] -
                image[1:, :-1]) + (image[:-1, 1:] - image[:-1, :-1]) * 0.5

        # Sum pixel values:
        p__ = (image[:-1, :-1] + image[:-1, 1:] + image[1:, :-1] +
               image[1:, 1:]) * 0.25

    else:
        ycoef = np.array([-0.5, -1, -0.5, 0, 0.5, 1, 0.5, 0])
        xcoef = np.array([-0.5, 0, 0.5, 1, 0.5, 0, -0.5, -1])

        # Compare by subtracting centered image from translated image:
        d___ = np.array(
            list(
                map(
                    lambda trans_slices: image[trans_slices] - image[
                        1:-1, 1:-1], [
                            (slice(None, -2), slice(None, -2)),
                            (slice(None, -2), slice(1, -1)),
                            (slice(None, -2), slice(2, None)),
                            (slice(1, -1), slice(2, None)),
                            (slice(2, None), slice(2, None)),
                            (slice(2, None), slice(1, -1)),
                            (slice(2, None), slice(None, -2)),
                            (slice(1, -1), slice(None, -2)),
                        ]))).swapaxes(0, 2).swapaxes(0, 1)

        # Decompose differences:
        dy__ = (d___ * ycoef).sum(axis=2)
        dx__ = (d___ * xcoef).sum(axis=2)

        # Sum pixel values:
        p__ = image[1:-1, 1:-1]

    # Compute gradient magnitudes per kernel:
    g__ = np.hypot(dy__, dx__) * 0.354801226089485
    #  no m__ = MAX_G - g__, immediate vm = -vg - Ave

    return ma.around(ma.stack((p__, g__, dy__, dx__), axis=0))
Example #18
0
def comp_pixel_m(image):  # current version of 2x2 pixel cross-correlation within image

    # following four slices provide inputs to a sliding 2x2 kernel:
    topleft__ = image[:-1, :-1]
    topright__ = image[:-1, 1:]
    botleft__ = image[1:, :-1]
    botright__ = image[1:, 1:]

    dy__ = ((botleft__ + botright__) - (topleft__ + topright__))  # same as diagonal from left
    dx__ = ((topright__ + botright__) - (topleft__ + botleft__))  # same as diagonal from right
    g__ = np.hypot(dy__, dx__)  # gradient per kernel

    # inverse match = SAD: measure of variation within kernel
    m__ = ( abs(topleft__ - botright__) + abs(topright__ - botleft__))

    return ma.stack((topleft__, g__, dy__, dx__, m__))
Example #19
0
def comp_r(dert__, fig):

    i__, g__, dy__, dx__ = dert__[:]
    dy__ = dy__[:, rng:-rng, rng:-rng]
    dx__ = dx__[:, rng:-rng, rng:-rng]

    # comparison
    d__ = translated_operation(i__, rng=rng, operator=op.sub)

    # sum within kernels
    dy__ += (d__ * Y_COEFFS[rng]).sum(axis=-1)
    dx__ += (d__ * X_COEFFS[rng]).sum(axis=-1)

    # compute gradient magnitudes
    g__ = ma.hypot(dy__, dx__)

    return ma.stack((i__, g__, dy__, dx__))
Example #20
0
def comp_3x3(image):
    """Deprecated."""
    d___ = np.array(  # subtract centered image from translated image:
        [image[ts2] - image[ts1] for ts1, ts2 in TRANSLATING_SLICES_PAIRS_3x3]
    ).swapaxes(0, 2).swapaxes(0, 1)
    # 3rd dimension: sequence of differences between pairs of
    # diametrically opposed pixels corresponding to:
    #          |--(clockwise)--+              |--(clockwise)--+
    # YCOEF: -0.5    -1  -0.5  ¦  XCOEF:    -0.5   0    0.5   ¦
    #          0           0   ¦             -1          1    ¦
    #         0.5     1   0.5  ¦            -0.5   0    0.5   ¦
    #                    <<----+                        <<----+
    # Decompose differences into dy and dx, same as Gy and Gx in conventional edge detection operators:
    dy__ = (d___ * YCOEF).sum(axis=2)
    dx__ = (d___ * XCOEF).sum(axis=2)
    p__ = image[1:-1, 1:-1]
    g__ = np.hypot(dy__, dx__)  # compute gradients per kernel, converted to 0-255 range

    return ma.stack((p__, g__, dy__, dx__))
def comp_pixel(image):  # 3x3 or 2x2 pixel cross-correlation within image

    if kwidth == 2:  # cross-compare four adjacent pixels diagonally:

        dy__ = (image[1:, 1:] - image[:-1, 1:]) + (image[1:, :-1] - image[:-1, :-1]) * 0.5
        dx__ = (image[1:, 1:] - image[1:, :-1]) + (image[:-1, 1:] - image[:-1, :-1]) * 0.5

        # or no coef: distance 1.41 * angle .705 -> 1? and conversion only for extended kernel, if centered?
        # sum pixel values and reconstruct central pixel as their average:

        p__ = (image[:-1, :-1] + image[:-1, 1:] + image[1:, :-1] + image[1:, 1:]) * 0.25

    else:  # kwidth == 3, compare central pixel to 8 rim pixels, current default option

        ycoef = np.array([-0.5, -1, -0.5, 0, 0.5, 1, 0.5, 0])
        xcoef = np.array([-0.5, 0, 0.5, 1, 0.5, 0, -0.5, -1])

        d___ = np.array(list(  # subtract centered image from translated image:
            map(lambda trans_slices: image[trans_slices] - image[1:-1, 1:-1],
                [
                    (slice(None, -2), slice(None, -2)),
                    (slice(None, -2), slice(1, -1)),
                    (slice(None, -2), slice(2, None)),
                    (slice(1, -1), slice(2, None)),
                    (slice(2, None), slice(2, None)),
                    (slice(2, None), slice(1, -1)),
                    (slice(2, None), slice(None, -2)),
                    (slice(1, -1), slice(None, -2)),
                ]
            )
        )).swapaxes(0, 2).swapaxes(0, 1)

        # Decompose differences into dy and dx, same as Gy and Gx in conventional edge detection operators:

        dy__ = (d___ * ycoef).sum(axis=2)
        dx__ = (d___ * xcoef).sum(axis=2)

        p__ = image[1:-1, 1:-1]

    g__ = np.hypot(dy__, dx__) * 0.354801226089485  # compute gradients per kernel, converted to 0-255 range

    return ma.around(ma.stack((p__, g__, dy__, dx__), axis=0))
Example #22
0
def comp_pixel(image):  # comparison of central pixel to rim pixels in 2x2 or 3x3 kernel, for the whole image

    if kwidth == 2:
        # Cross-compare four adjacent pixels:
        dy__ = (image[1:, 1:] - image[:-1, 1:]) + (image[1:, :-1] - image[:-1, :-1]) * 0.5
        dx__ = (image[1:, 1:] - image[1:, :-1]) + (image[:-1, 1:] - image[:-1, :-1]) * 0.5
        # Sum pixel values:
        p__ = (image[:-1, :-1]
               + image[:-1, 1:]
               + image[1:, :-1]
               + image[1:, 1:]) * 0.25
    else:
        ycoef = np.array([-0.5, -1, -0.5, 0, 0.5, 1, 0.5, 0])
        xcoef = np.array([-0.5, 0, 0.5, 1, 0.5, 0, -0.5, -1])

        # Compare by subtracting centered image from translated image:
        d___ = np.array(list(
            map(lambda trans_slices: image[trans_slices] - image[1:-1, 1:-1],
                [
                    (slice(None, -2), slice(None, -2)),
                    (slice(None, -2), slice(1, -1)),
                    (slice(None, -2), slice(2, None)),
                    (slice(1, -1), slice(2, None)),
                    (slice(2, None), slice(2, None)),
                    (slice(2, None), slice(1, -1)),
                    (slice(2, None), slice(None, -2)),
                    (slice(1, -1), slice(None, -2)),
                ]
            )
        )).swapaxes(0, 2).swapaxes(0, 1)

        # Decompose differences:
        dy__ = (d___ * ycoef).sum(axis=2)
        dx__ = (d___ * xcoef).sum(axis=2)

        p__ = image[1:-1, 1:-1]

    # Compute gradients per kernel:
    g__ = np.hypot(dy__, dx__) * 0.354801226089485  # no m__ = MAX_G - g__

    return ma.around(ma.stack((p__, g__, dy__, dx__), axis=0))
def comp_3x3_loop(image):
    buff_ = deque()  # buffer for derts
    Y, X = image.shape

    for p_ in image:  # loop through rows
        for p in p_:  # loop through each pixel
            dx = dy = 0  # uni-lateral differences
            # loop through buffers:
            for k, xcoeff, ycoeff in zip(
                [0, X - 1, X, X + 1],  # indices of _dert
                [0.25, -0.125, 0, 0.125],  # x axis coefficient
                [0, 0.125, 0.25, 0.125]):  # y axis coefficient
                try:
                    _p, _dy, _dx = buff_[k]  # unpack buff_[k]
                    d = p - _p  # compute difference
                    dx_buff = d * xcoeff  # decompose difference
                    dy_buff = d * ycoeff
                    dx += dx_buff  # accumulate fuzzy difference over the kernel
                    _dx += dx_buff
                    dy += dy_buff
                    _dy += dy_buff

                    buff_[k] = _p, _dy, _dx  # repack buff_[k]

                except TypeError:  # buff_[k] is None
                    pass
                except IndexError:  # k >= len(buff_)
                    break

            buff_.appendleft(
                (p, dy, dx))  # initialize dert with uni-lateral differences
        buff_.appendleft(None)  # add empty dert at the end of each row

    # reshape data and compute g (temporary, to perform tests)
    p__, dy__, dx__ = np.array([buff for buff in reversed(buff_)
                                if buff is not None])\
        .reshape(Y, X, 3).swapaxes(1, 2).swapaxes(0, 1)[:, 1:-1, 1:-1]

    g__ = ma.hypot(dy__, dx__)

    return ma.stack((p__, g__, dy__, dx__))
Example #24
0
def comp_2x2(image):

    # initialize empty array
    dx__ = np.zeros((
        image.shape[0] - 1,
        image.shape[1] - 1,
    ))
    dy__ = np.zeros((
        image.shape[0] - 1,
        image.shape[1] - 1,
    ))
    p__ = np.zeros((
        image.shape[0] - 1,
        image.shape[1] - 1,
    ))

    # slide across each pixel, compute dy,dx and reconstruct central pixel
    for y in range(image.shape[0] - 1):
        for x in range(image.shape[1] - 1):

            # 2x2 kernel, given 4 pixels below:
            #  p1 p2
            #  p3 p4
            # dx  = ((p2-p3) + (p4-p1)) /2 (get mean of diagonal difference)
            # dy  = ((p4-p1) + (p3-p2)) /2 (get mean of diagonal difference)
            # p__ = (p1+p2+p3+p4) /4

            dx__[y,x] = ((image[y,x+1] - image[y+1,x]) + \
                         (image[y+1,x+1] - image[y,x]))/2

            dy__[y,x] = ((image[y+1,x+1] - image[y,x]) + \
                            (image[y+1,x] - image[y,x+1]))/2

            p__[y,x] = ((image[y,x] + image[y+1,x]) + \
                       (image[y,x+1] + image[y+1,x+1]))/4

    g__ = np.hypot(
        dy__, dx__)  # compute gradients per kernel, converted to 0-255 range

    return ma.stack((p__, g__, dy__, dx__))
Example #25
0
def comp_pixel(
    image
):  # 2x2 pixel cross-correlation within image, as in edge detection operators

    # input slices into sliding 2x2 kernel, each slice is a shifted 2D frame of grey-scale pixels:
    topleft__ = image[:-1, :-1]
    topright__ = image[:-1, 1:]
    bottomleft__ = image[1:, :-1]
    bottomright__ = image[1:, 1:]

    Gy__ = ((bottomleft__ + bottomright__) - (topleft__ + topright__)
            )  # same as decomposition of two diagonal differences into Gy
    Gx__ = ((topright__ + bottomright__) - (topleft__ + bottomleft__)
            )  # same as decomposition of two diagonal differences into Gx

    G__ = np.hypot(
        Gy__,
        Gx__)  # central gradient per kernel, between its four vertex pixels
    # why ma?
    return ma.stack(
        (topleft__, G__, Gy__,
         Gx__))  # tuple of 2D arrays per param of dert (derivatives' tuple)
Example #26
0
def comp_g(dert__, odd):
    g__ = dert__[0]
    a__ = dert__[1:]

    # loop through each pair of comparands in a kernel
    dgy__ = ma.zeros(np.subtract(g.shape, rng))
    dgx__ = ma.zeros(np.subtract(g.shape, rng))
    for x_coeff, y_coeff, (ts, _ts) in zip(X_COEFFS[rng], Y_COEFFS[rng],
                                           TRANSLATING_SLICES_PAIRS_[rng]):
        # find angle differences
        da__ = angle_diff(a__[ts], a__[_ts])

        # compute dg: dg = g - _g * cos(da) at each position
        dg__ = g__[ts] - g__[_ts] * da__[1]

        # accumulate dgy, dgx
        dgx__ += dg__ * x_coeff
        dgy__ += dg__ * y_coeff

    gg__ = ma.hypot(dgy__, dgx__)

    return ma.stack((g__, gg__, dgy__, dgx__))
Example #27
0
def write_trace_species_raster(data, profile, outname):
    """
    This function takes a list of numpy arrays (each representing a trace species)
    along with a profile dictionary and an output filename.
    It calculates the sum of the values of each layer of the stacked array - ie of the pixels "lying on top of each other"
    / or along axis=0.  It then writes this aggregate 2D array as a new raster.
    Effectively this represents the cumulative contributions of all species masked as "trace" in each pixel.
    """

    #  Stack the input data
    all_layers = ma.stack(data)

    #  Add up all the pixels lying "on top of" each other in the stack
    layers_sum = all_layers.sum(axis=0)

    print(
        f'\n Writing output raster with summed trace species contributions: {outname}\n'
    )

    #  Write the data
    with rio.open(outname, 'w', **profile) as dest:
        dest.write(layers_sum.filled(-9999))

    return
Example #28
0
    def _run(self, calc_mode):
        """ Main method of the class. Reads data arrays, process them and returns results. """

        self.logger.info('Started!')
        self.logger.info('Calculation mode: %s', calc_mode)

        # Get inputs
        input_uids = self._data_helper.input_uids()
        assert input_uids, '(CalcBasicStat::run) No input arguments!'

        # Get parameters
        parameters = None
        if len(input_uids) == MAX_N_INPUT_ARGUMENTS:
            parameters = self._data_helper.get(
                input_uids[INPUT_PARAMETERS_INDEX])

        # Check parameters
        if not calc_mode in parameters:
            self.logger.error(
                'Error! No parameter \'%s\' in module parameters! Check task-file!',
                calc_mode)
            raise ValueError

        # Get outputs
        output_uids = self._data_helper.output_uids()
        assert output_uids, '(CalcBasicStat::run) No output arguments!'

        # Get time segments and levels
        time_segments = self._data_helper.get_segments(input_uids[0])
        vertical_levels = self._data_helper.get_levels(input_uids[0])

        # Get input data info and pass through units to the result description.
        data_info = self._data_helper.get_data_info(input_uids[0])
        input_description = data_info['description']
        result_description = {
            '@name': input_description['@name'],
            '@units': input_description['@units']
        }

        # Make desired statistical function shortcut for segment and final processing .
        if calc_mode == 'timeMean':
            seg_stat_func = ma.mean
            final_stat_func = ma.mean
            final_title = 'Average of ' + input_description['@title']
        elif calc_mode == 'timeMin':
            seg_stat_func = ma.min
            final_stat_func = ma.min
            final_title = 'Minimum of ' + input_description['@title']
        elif calc_mode == 'timeMax':
            seg_stat_func = ma.max
            final_stat_func = ma.max
            final_title = 'Maximum of ' + input_description['@title']
        elif calc_mode == 'timeMeanPrec':
            seg_stat_func = ma.sum
            final_stat_func = ma.mean
            final_title = 'Average sum of ' + input_description['@title']

        for level in vertical_levels:
            all_segments_data = []
            for segment in time_segments:
                one_segment_time_grid = []

                # Get data
                result = self._data_helper.get(input_uids[0],
                                               segments=segment,
                                               levels=level)

                # Daily statistics.
                if parameters[calc_mode] == 'day':
                    one_segment_data = []
                    data_time_iter = itertools.zip_longest(
                        result['data'][level][segment['@name']]['@values'],
                        result['data'][level][segment['@name']]['@time_grid'])
                    for date_key, group in itertools.groupby(
                            data_time_iter, key=self._data_time_key):
                        group_data = []
                        for data, _ in group:
                            group_data.append(data)
                        group_data = ma.stack(group_data)
                        one_segment_time_grid.append(date_key)

                        # Calulate time statistics for a current time group (day)
                        one_segment_data.append(
                            seg_stat_func(group_data, axis=0))

                    one_segment_data = ma.stack(one_segment_data)

                # Calculate time statistics for a current time segment
                if (parameters[calc_mode] == 'data') or (parameters[calc_mode]
                                                         == 'segment'):
                    if len(result['data'][level][segment['@name']]
                           ['@time_grid']) > 1:
                        one_segment_data = seg_stat_func(
                            result['data'][level][segment['@name']]['@values'],
                            axis=0)
                    else:
                        one_segment_data = result['data'][level][
                            segment['@name']]['@values']
                    mid_time = result['data'][level][segment['@name']]['@time_grid'][0] + \
                               (result['data'][level][segment['@name']]['@time_grid'][-1] - \
                                result['data'][level][segment['@name']]['@time_grid'][0]) / 2
                    one_segment_time_grid.append(mid_time)

                # For segment-wise averaging send to the output current time segment results
                # or store them otherwise.
                if (parameters[calc_mode] == 'day') or (parameters[calc_mode]
                                                        == 'segment'):
                    self._data_helper.put(output_uids[0],
                                          values=one_segment_data,
                                          level=level,
                                          segment=segment,
                                          longitudes=result['@longitude_grid'],
                                          latitudes=result['@latitude_grid'],
                                          times=one_segment_time_grid,
                                          fill_value=result['@fill_value'],
                                          meta=result['meta'],
                                          description=result_description)
                elif parameters[calc_mode] == 'data':
                    all_segments_data.append(one_segment_data)

            # For data-wise analysis analyse segments analyses :)
            if parameters[calc_mode] == 'data':
                data_out = final_stat_func(ma.stack(all_segments_data), axis=0)

                # Make a global segment covering all input time segments
                full_range_segment = copy(
                    time_segments[0]
                )  # Take the beginning of the first segment...
                full_range_segment['@ending'] = time_segments[-1][
                    '@ending']  # and the end of the last one.
                full_range_segment[
                    '@name'] = 'GlobalSeg'  # Give it a new name.

                # Correct title and name
                if final_title is not None:
                    result_description['@title'] = final_title

                self._data_helper.put(output_uids[0],
                                      values=data_out,
                                      level=level,
                                      segment=full_range_segment,
                                      longitudes=result['@longitude_grid'],
                                      latitudes=result['@latitude_grid'],
                                      fill_value=result['@fill_value'],
                                      meta=result['meta'],
                                      description=result_description)

        self.logger.info('Finished!')
Example #29
0
    def run(self):
        """ Main method of the class. Reads data arrays, process them and returns results. """

        self.logger.info('Started!')

        # Get inputs
        input_uids = self._data_helper.input_uids()
        assert input_uids, 'Error! No input arguments!'

        # Get parameters
        parameters = None
        if len(input_uids
               ) == MAX_N_INPUT_ARGUMENTS:  # If parameters are given.
            parameters = self._data_helper.get(
                input_uids[INPUT_PARAMETERS_INDEX])
        threshold = float(
            self._get_parameter('Threshold', parameters, DEFAULT_VALUES))
        calc_mode = self._get_parameter('Mode', parameters, DEFAULT_VALUES)

        self.logger.info('Threshold: %s', threshold)
        self.logger.info('Calculation mode: %s', calc_mode)

        # Get outputs
        output_uids = self._data_helper.output_uids()
        assert output_uids, 'Error! No output arguments!'

        # Get time segments and levels
        time_segments = self._data_helper.get_segments(input_uids[DATA_UID])
        vertical_levels = self._data_helper.get_levels(input_uids[DATA_UID])

        data_func = ma.max  # For calc_mode == 'data' we calculate max over all segments.

        # Set result units.
        result_description = deepcopy(
            self._data_helper.get_data_info(input_uids[0])['description'])
        result_description[
            '@title'] = 'Count of days with precipitation >= {} mm'.format(
                threshold)
        result_description['@units'] = 'days'

        # Main loop
        for level in vertical_levels:
            all_segments_data = []
            for segment in time_segments:
                # Read data
                data = self._data_helper.get(input_uids[DATA_UID],
                                             segments=segment,
                                             levels=level)
                values = data['data'][level][segment['@name']]['@values']
                time_grid = data['data'][level][segment['@name']]['@time_grid']

                one_segment_data = self.calc_rnnmm(values, time_grid,
                                                   threshold)

                # For segment-wise averaging send to the output current time segment results
                # or store them otherwise.
                if calc_mode == 'segment':
                    self._data_helper.put(output_uids[0],
                                          values=one_segment_data,
                                          level=level,
                                          segment=segment,
                                          longitudes=data['@longitude_grid'],
                                          latitudes=data['@latitude_grid'],
                                          fill_value=data['@fill_value'],
                                          description=result_description,
                                          meta=data['meta'])
                elif calc_mode == 'data':
                    all_segments_data.append(one_segment_data)
                else:
                    self.logger.error(
                        'Error! Unknown calculation mode: \'%s\'', calc_mode)
                    raise ValueError

            # For data-wise analysis analyse segments analyses :)
            if calc_mode == 'data':
                data_out = data_func(ma.stack(all_segments_data), axis=0)

                # Make a global segment covering all input time segments
                full_range_segment = deepcopy(
                    time_segments[0]
                )  # Take the beginning of the first segment...
                full_range_segment['@ending'] = time_segments[-1][
                    '@ending']  # and the end of the last one.
                full_range_segment[
                    '@name'] = 'GlobalSeg'  # Give it a new name.

                self._data_helper.put(output_uids[0],
                                      values=data_out,
                                      level=level,
                                      segment=full_range_segment,
                                      longitudes=data['@longitude_grid'],
                                      latitudes=data['@latitude_grid'],
                                      fill_value=data['@fill_value'],
                                      meta=data['meta'],
                                      description=result_description)

        self.logger.info('Finished!')
Example #30
0
            swe_per_level.append(masked_data)

        if parameter == 191 and level_type == 'sfc' and level in [
                721, 722, 723
        ]:
            print('Found SR, layer {}'.format(level))
            raw_data = grib_message['values']
            masked_data = ma.masked_where(
                raw_data == grib_message['missingValue'], raw_data)

            sr_per_level.append(masked_data)

print('Snow depth calculation...')

swe_all_levels = ma.stack(swe_per_level)
sr_all_levels = ma.stack(sr_per_level)

snow_depth = ma.sum(swe_all_levels / sr_all_levels, axis=0)

print('Generating output GRIB')

if out_message is not None:
    missing_value = out_message['missingValue']
    out_message['values'] = snow_depth.filled(fill_value=missing_value)
    out_message['level'] = 721  # FIXME: correct level?
    out_message['indicatorOfParameter'] = 141  # FIXME: correct parameter id?

    with open(outfile_snow_depth, 'wb') as out_file:
        out_message.write(out_file)