Exemplo n.º 1
0
def buffered_bounded_lemire_uint8(bitgen, rng, bcnt, buf):
    """
    Generates a random unsigned 8 bit integer bounded
    within a given interval using Lemire's rejection.

    The buffer acts as storage for a 32 bit integer
    drawn from the associated BitGenerator so that
    multiple integers of smaller bitsize can be generated
    from a single draw of the BitGenerator.
    """
    # Note: `rng` should not be 0xFF. When this happens `rng_excl` becomes
    # zero.
    rng_excl = uint8(rng) + uint8(1)

    assert (rng != 0xFF)

    # Generate a scaled random number.
    n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf)
    m = uint16(n * rng_excl)

    # Rejection sampling to remove any bias
    leftover = m & 0xFF

    if (leftover < rng_excl):
        # `rng_excl` is a simple upper bound for `threshold`.
        threshold = ((uint8(UINT8_MAX) - rng) % rng_excl)

        while (leftover < threshold):
            n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf)
            m = uint16(n * rng_excl)
            leftover = m & 0xFF

    return m >> 8, bcnt, buf
Exemplo n.º 2
0
 def __init__(self):
     self.piece = int64(0)
     self.pattern = np.zeros((4, 2), dtype=np.uint8)
     self.status1 = np.zeros((4, 2), dtype=np.uint8)
     self.status4 = np.zeros(2, dtype=np.uint8)
     self.adj1 = uint8(0)
     self.adj2 = uint8(0)
Exemplo n.º 3
0
    def mainloop(tables):
        link_libs()
        eta_ret = 0

        uettp_initial
        init_llvm
        beforeloop_code
        if READER.size > 4:
            SYNCRate_pspr = READER[4]  # TODO: unhack
        while True:
            AbsTime_ps = VCHN_next(ptr_VCHN, ptr_fileid, ptr_chn)
            chn = scalar_chn[0]
            fileid = scalar_fileid[0]
            if AbsTime_ps == 9223372036854775807:  # full stop
                break
            looping
            if INTERRUPT:
                eta_ret += INTERRUPT
                break
            if fileid < num_rslot:
                controller_rfile_time = FileReader_pop_event(
                    ptr_READER, nb.uint8(fileid), ptr_chn_next)
                if controller_rfile_time == 9223372036854775807:  # early stop
                    break
                else:
                    eta_ret += POOL_update(ptr_VCHN, nb.int64(controller_rfile_time),
                                           nb.uint8(fileid), nb.uint8(scalar_chn_next[0]))
        deinit
        return eta_ret
Exemplo n.º 4
0
def rotatedCubeOverlap(gt, dt, hAxis):
    # cube to box
    gbox = cuda.local.array((5, ), dtype=numba.float32)
    dbox = cuda.local.array((5, ), dtype=numba.float32)
    tInd = 0
    for ind in range(4):
        tInd += (tInd % 3 == hAxis)
        gbox[ind], dbox[ind] = gt[tInd], dt[tInd]
        tInd += 1
    gbox[-1], dbox[-1] = gt[-1], dt[-1]  # rot
    # box to vertex
    gv = cuda.local.array((8, ), dtype=numba.float32)
    box2vertex(gbox, gv)
    dv = cuda.local.array((8, ), dtype=numba.float32)
    box2vertex(dbox, dv)
    # obtain vertex of intersection area
    interVertex, vertexNum = cuda.local.array((16, ), dtype=numba.float32), numba.uint8(0)
    vertexNum = numba.uint8(vertexInInter(gv, dv, interVertex, vertexNum, True))
    vertexNum = numba.uint8(vertexInInter(dv, gv, interVertex, vertexNum, False))
    vertexNum = numba.uint8(cuspOnSide(gv, dv, interVertex, vertexNum))
    if vertexNum:
        # sort inner points
        sortVertex(interVertex, vertexNum)
        # calculate intersection area
        boxInter = calInterArea(interVertex, vertexNum)
        # calculate intersection volume
        gHoffset, dHoffset = gt[hAxis + 3] / 2, dt[hAxis + 3] / 2
        hInter = max(0, min(gt[hAxis] + gHoffset, dt[hAxis] + dHoffset) - max(gt[hAxis] - gHoffset, dt[hAxis] - dHoffset))
        cubeInter = boxInter * hInter
    else:
        cubeInter = 0.
    return cubeInter
Exemplo n.º 5
0
 def __init__(self, x=uint8(0), y=uint8(0), value=int32(0)):
     """
     @type self: OXMove
     @type x: np.uint8
     @type y: np.uint8
     @type y: np.int32
     """
     self.x = x
     self.y = y
     self.value = value
Exemplo n.º 6
0
def morton_encode_nb(coords):
    """
    >>> x, y = np.arange(8), np.arange(8)
    >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
    >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8)

    For the sake of clarity, let's inspect these values

    >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}):
    ...     print(inp)
    [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000]
      [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071]
      [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280]
      [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616]
      [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062]
      [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602]
      [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220]
      [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]]

    We rearrange them according to the Morton encoding, and then reshape
    the resulting array into the same dimensions as the chunks. You can see
    that (for the most part) groups similar frequency ranges close to each
    other (TODO: perhaps this isn't the best visualization...)

    >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}):
    ...     print(morton_encode_nb(inp)[0].reshape(8, 8))
    [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162]
     [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243]
     [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071]
     [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616]
     [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831]
     [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616]
     [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602]
     [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]]

    :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks
        must have the same size!
    :return: rearranged array of size [BS, product of chunk sizes]
    """
    assert len(coords.shape) <= 9
    # assert all(coord == coords[1] for coord in coords[1:])
    n = len(coords.shape) - 1

    bs = coords.shape[0]
    chunk_size = nb.uint8(coords.shape[n])
    total_chunk_size = nb.int64(chunk_size ** n)

    chunk_bits = log2i(chunk_size - 1)

    ind = np.zeros(n, dtype=nb.uint8)

    output = np.zeros((bs, total_chunk_size), dtype=coords.dtype)

    for index, x in np.ndenumerate(coords):
        for i in range(n):
            ind[i] = np.uint8(index[1 + i])
        morton_offset = encode_single_coord(ind, chunk_bits)
        output[index[0], morton_offset] = x

    return output
Exemplo n.º 7
0
def buffered_uint8(bitgen, bcnt, buf):
    if not bcnt:
        buf = next_uint32(bitgen)
        bcnt = 3
    else:
        buf >>= 8
        bcnt -= 1

    return uint8(buf), bcnt, buf
Exemplo n.º 8
0
def _rotate_nullbit(nullbit):
    # TODO: Arrow assumes little endian. Detect big endian machines and modify
    #       rotation direction.
    nullbit = (nullbit << 1) & 255

    # Have we looped?
    if nullbit == 0:
        return numba.uint8(1)

    return nullbit
Exemplo n.º 9
0
def _read_boolean(position, block):
    """Read a single byte whose value is either 0 (false) or 1 (true).

    Returns:
        Tuple[int, numba.uint8]:
            (new position, boolean)

    """
    # We store bool as a bit array. Return 0xff so that we can bitwise AND with
    # the mask that says which bit to write to.
    value = numba.uint8(0xff if block[position] != 0 else 0)
    return (position + 1, value)
Exemplo n.º 10
0
def median_filter(pixels,m,n):
	sub_img=cuda.shared.array(shape=kernel_size,dtype=uint8) #A shared array to store the elements of the kernel
	block_id_x=cuda.blockIdx.x
	block_id_y=cuda.blockIdx.y
	thread_id_x=cuda.threadIdx.x
	thread_id_y=cuda.threadIdx.y
	Memory_position_x=block_id_x+thread_id_x-1
	Memory_position_y=block_id_y+thread_id_y-1
	linear_index=thread_id_x*kernel_width+thread_id_y
	isVisible= False if Memory_position_x < 0 or Memory_position_x > m or Memory_position_y < 0 or Memory_position_y > n else True
	if not isVisible:
		sub_img[linear_index]=0
	else:
		sub_img[linear_index]=pixels[Memory_position_x,Memory_position_y]
	
	if thread_id_x==1 and thread_id_y==1:
		for x in range(len(sub_img)-1,0,-1):
			for i in range(x):
				if sub_img[i]>sub_img[i+1]:
					temp=sub_img[i]
					sub_img[i]=sub_img[i+1]
					sub_img[i+1]=temp
		median=sub_img[int((kernel_size+1)/2)-1] if (kernel_size)%2==1 else uint8((sub_img[int((kernel_size)/2)-1]+sub_img[int((kernel_size)/2)])/2)
		pixels[Memory_position_x,Memory_position_y]=median
Exemplo n.º 11
0
                            fy=image_scale,
                            interpolation=cv2.INTER_AREA)
        if filter_size > 0:
            im = cv2.boxFilter(im, -1, (filter_size, filter_size))
        if color_invert:
            im = 255 - im
        if clip > 0:
            im = np.maximum(im, clip) - clip

        if self.set_diagnostic == "filtered":
            self.diagnostic_image = im

        return NodeOutput([], im)


@vectorize([uint8(float32, uint8)])
def negdif(xf, y):
    """

    Parameters
    ----------
    x :

    y :


    Returns
    -------

    """
    x = np.uint8(xf)
Exemplo n.º 12
0
import math
import numpy as np
import numba
from numba import cuda
from numba.cuda import random as ncrand


@cuda.jit(numba.uint8(numba.uint8), device=True, inline=True)
def hamming(n):
    """
    Hamming weight, i.e. number of set bits

    :param n: np.uint8
    :return:  np.uint8
    """
    # recursively divide in two, combinig sums by bit shifting and adding
    n = (n & np.uint8(85)) + ((n >> 1) & np.uint8(85))  # 85=01010101b
    n = (n & np.uint8(51)) + ((n >> 2) & np.uint8(51))  # 51=00110011b
    n = (n & np.uint8(15)) + ((n >> 4) & np.uint8(15))  # 15=00001111b
    return n


@cuda.jit(device=True, inline=True)
def calc_single_interaction_energy(index, spins, shape_shifts,
                                   coupling_indices, coupling_constants):
    energy = 0.0
    num_dim = shape_shifts.size
    num_neighbors = coupling_constants.size
    for n_index in range(num_neighbors):
        other_index = 0
        shift = 0
Exemplo n.º 13
0
def numba_decompress_blocks(input, block_size, last_block_size, block_ends,
                            output):
    num_blocks = len(block_ends)

    for p in numba.prange(num_blocks):
        if p == 0:
            i = numba.uint64(0)
        else:
            i = numba.uint64(block_ends[p - numba.uint(1)])

        block_end = numba.uint64(block_ends[p])
        j = numba.uint64(block_size * p)

        if (p == (num_blocks - numba.uint8(1))):
            end = j + numba.uint64(last_block_size)
        else:
            end = j + numba.uint64(block_size)

        while ((j < end) and (i < block_end)):
            t1 = numba.uint16((input[i] & 0xF0) >> 4)
            t2 = numba.uint16((input[i] & 0x0F) + 4)
            i += numba.uint8(1)

            if (t1 == 15):
                while input[i] == 255:
                    t1 += numba.uint8(input[i])
                    i += numba.uint8(1)

                t1 += numba.uint8(input[i])
                i += numba.uint8(1)

            for n in range(t1):
                output[j] = input[i]
                i += numba.uint8(1)
                j += numba.uint8(1)

            if (j >= end): break

            off = numba.uint16(input[i]) + (numba.uint16(input[i + 1]) << 8)
            i += numba.uint8(2)

            if (t2 == 19):
                while input[i] == 255:
                    t2 += numba.uint8(input[i])
                    i += numba.uint8(1)

                t2 += numba.uint8(input[i])
                i += numba.uint8(1)

            for n in range(t2):
                output[j] = output[j - off]
                j += numba.uint8(1)
Exemplo n.º 14
0
 def __init__(self, x=uint8(0), y=uint8(0)):
     self.x = x
     self.y = y
Exemplo n.º 15
0
def invert(img: MyImage):
    return MyImage(uint8(255) - img.array)
Exemplo n.º 16
0
def _get_quadric_coefficients(dem: np.ndarray,
                              resolution: float,
                              fill_method: str = "median",
                              edge_method: str = "nearest") -> np.ndarray:
    """
    Run the pixel-wise analysis in parallel.

    See the xdem.terrain.get_quadric_coefficients() docstring for more info.
    """
    # Rename the resolution to be consistent with the ArcGIS reference.
    L = resolution

    # Allocate the output.
    output = np.empty((9, ) + dem.shape, dtype=dem.dtype) + np.nan

    # Convert the string to a number (fewer bytes to compare each iteration)
    if fill_method == "median":
        fill_method_n = numba.uint8(0)
    elif fill_method == "mean":
        fill_method_n = numba.uint8(1)
    elif fill_method == "none":
        fill_method_n = numba.uint8(2)

    if edge_method == "nearest":
        edge_method_n = numba.uint8(0)
    elif edge_method == "wrap":
        edge_method_n = numba.uint8(1)
    elif edge_method == "none":
        edge_method_n = numba.uint8(2)

    # Loop over every pixel concurrently.
    for i in numba.prange(dem.size):
        # Derive its associated row and column index.
        col = i % dem.shape[1]
        row = int(i / dem.shape[1])

        # Extract the pixel and its 8 immediate neighbours.
        # If the border is reached, just duplicate the closest neighbour to obtain 9 values.
        Z = np.empty((9, ), dtype=dem.dtype)
        count = 0

        # If edge_method == "none", validate that it's not near an edge. If so, leave the nans without filling.
        if edge_method_n == 2:
            if (row < 1) or (row > (dem.shape[0] - 2)) or (col < 1) or (
                    col > (dem.shape[1] - 2)):
                continue

        for j in range(-1, 2):
            for k in range(-1, 2):
                # Here the "nearest" edge_method is performed.
                if edge_method_n == 0:
                    row_indexer = min(max(row + k, 0), dem.shape[0] - 1)
                    col_indexer = min(max(col + j, 0), dem.shape[1] - 1)
                elif edge_method_n == 1:
                    row_indexer = (row + k) % dem.shape[0]
                    col_indexer = (col + j) % dem.shape[1]
                Z[count] = dem[row_indexer, col_indexer]
                count += 1

        # Get a mask of all invalid (nan or inf) values.
        invalids = ~np.isfinite(Z)
        n_invalid = np.count_nonzero(invalids)

        # Skip the pixel if it and all of its neighbours are invalid
        if np.all(invalids):
            continue

        if np.count_nonzero(invalids) > 0:
            if fill_method_n == 0:
                # Fill all non-finite values with the most common value.
                Z[invalids] = np.nanmedian(Z)
            elif fill_method_n == 1:
                # Fill all non-finite values with the mean.
                Z[invalids] = np.nanmean(Z)
            elif fill_method_n == 2:
                # Skip the pixel if any of its neighbours are nan.
                continue
            else:
                # This should not occur.
                pass

        # Assign the A, B, C, D etc., factors to the output. This ugly syntax is needed to make parallel numba happy.
        output[0, row,
               col] = ((Z[0] + Z[2] + Z[6] + Z[8]) / 4 -
                       (Z[1] + Z[3] + Z[5] + Z[7]) / 2 + Z[4]) / (L**4)  # A
        output[1, row, col] = ((Z[0] + Z[2] - Z[6] - Z[8]) / 4 -
                               (Z[1] - Z[7]) / 2) / (L**3)  # B
        output[2, row, col] = ((-Z[0] + Z[2] - Z[6] + Z[8]) / 4 +
                               (Z[3] - Z[5]) / 2) / (L**3)  # C
        output[3, row, col] = ((Z[3] + Z[5]) / 2 - Z[4]) / (L**2)  # D
        output[4, row, col] = ((Z[1] + Z[7]) / 2 - Z[4]) / (L**2)  # E
        output[5, row, col] = (-Z[0] + Z[2] + Z[6] - Z[8]) / (4 * L**2)  # F
        output[6, row, col] = (-Z[3] + Z[5]) / (2 * L)  # G
        output[7, row, col] = (Z[1] - Z[7]) / (2 * L)  # H
        output[8, row, col] = Z[4]  # I

    return output
Exemplo n.º 17
0
import time

import numba
import numpy as np


@numba.jit(numba.uint8(numba.complex64, numba.complex64))
def cnt(z, c):
    k = 0
    while k < 100:
        z = z * z + c
        if z.real**2 + z.imag**2 > 4:
            break
        k += 1
    return k


@numba.jit
def mand(M, N):
    init_z = complex(0.0, 0.0)
    grid = np.empty((M, N), dtype=np.uint8)
    xs = np.linspace(-2, 2, N)
    ys = np.linspace(-2, 2, M)
    for j, y in enumerate(ys):
        for i, x in enumerate(xs):
            grid[j, i] = cnt(init_z, complex(x, y))
    return grid


def main():
    s = time.time()
from doppelspeller.common import (
    get_ground_truth, get_train_data, get_test_data, get_words_counter, get_data_for_one_title
)

LOGGER = logging.getLogger(__name__)

DATA_TYPE_MAPPING = {
    c.DATA_TYPE_TRAIN: get_train_data,
    c.DATA_TYPE_TEST: get_test_data,
    c.DATA_TYPE_SINGLE: get_data_for_one_title,
}
WORD_ENCODING_ZEROS = [0] * s.MAX_CHARACTERS_ALLOWED_IN_THE_TITLE
WORD_COUNTER_ZEROS = [0] * s.NUMBER_OF_WORDS_FEATURES


@numba.njit(numba.uint8(numba.uint8[:], numba.uint8[:]), fastmath=True)
def fast_levenshtein_ratio(sequence, sequence_to_compare_against):
    """
    Returns the Levenshtein ratio for encoded string sequences. For example, the string "coolblue bv" is converted into:
        - np.array([4, 16, 16, 13, 3, 13, 22, 6, 1, 3, 23])
    """
    length_x = sequence.shape[0]
    length_y = sequence_to_compare_against.shape[0]
    total_length = length_x + length_y

    if length_x > length_y:
        length_x, length_y = length_y, length_x
        sequence, sequence_to_compare_against = sequence_to_compare_against, sequence

    size_x = length_x + 1
    size_y = length_y + 1
Exemplo n.º 19
0
            gray_scale = expand(gray_scale)
            return gray_scale, boxes, meta_image
        kernel = np.ones((5, 5), np.uint8)
        gray_scale = augment_boxes(gray_scale, boxes.astype(np.int32), kernel,
                                   self.dialation_prob)
        # toc2 = time.time() - tic - toc1
        # print('Agument: %4.3f' % toc2)
        # gray_scale = np.repeat(gray_scale[:,:, np.newaxis], 3, axis=2)
        gray_scale = expand(gray_scale)
        q = 0.4 + 0.2 * np.random.rand()
        gray_scale = np.array(q * gray_scale + (1 - q) * image, dtype=np.uint8)
        # print('expand: %4.3f' % toc3)
        return gray_scale, boxes, meta_image


@jit(uint8(uint8, int32, uint8, float32), nogil=True)
def augment_boxes(gray_scale, boxes, kernel, prob):
    for b in boxes:
        img = gray_scale[b[1]:b[3], b[0]:b[2]]
        p = np.random.rand()
        if p < prob and img.shape[0] > 1 and img.shape[1] > 1:
            # if img.shape[0] > 1 and img.shape[1] > 1:
            augmented = cv2.dilate(img, kernel=kernel, iterations=1)
        else:
            augmented = img
        gray_scale[b[1]:b[3], b[0]:b[2]] = augmented
    return gray_scale


@jit(uint8(uint8), nogil=True)
def expand(gray_scale):
def mask_nonskin(im_src, mask):
    return uint8((mask//255)*im_src)
# Cr <= (-1.15*Cb)+301.75 and
# Cr <= (-2.2857*Cb)+432.85
import DatasetPrepare

import cv2
import matplotlib.pyplot as plt
import os
import PIL
from numba import vectorize,float64,uint8,guvectorize

# backSub = cv2.createBackgroundSubtractorMOG2()

# @vectorize([uint8[:,:](uint8[:,:],uint8[:,:],uin8[:,:])])
# def mask_pixel1(pixel):
#     return mask_pixel(pixel[0],pixel[1],pixel[2])
@vectorize([uint8(uint8,uint8,uint8,uint8,uint8,uint8,uint8,uint8,uint8)])
def mask_pixel(B,G,R,H,S,V,Y,Cr,Cb):
    # B,G,R = pixel_bgr
    # H,S,V = pixel_hsv
    # Y,Cr,Cb = pixel_ycrcb
    if (
        (0.0 <= H <= 50.0 and 0.23 <= S <= 0.68 and\
        R > 95 and G > 40 and B > 20 and R > G and R > B and abs(R - G) > 15)\
        or\
        (R > 95 and G > 40 and B > 20 and R > G and R > B and abs(R - G )> 15 and Cr > 135 and\
        Cb > 85 and Y > 80 and Cr <= (1.5862*Cb)+20 and Cr>=(0.3448*Cb)+76.2069 and\
        Cr >= (-4.5652*Cb)+234.5652 and\
        Cr <= (-1.15*Cb)+301.75 and\
        Cr <= (-2.2857*Cb)+432.85)
        # R > 10
    ):
Exemplo n.º 22
0
# Cr >= (-4.5652*Cb)+234.5652 and
# Cr <= (-1.15*Cb)+301.75 and
# Cr <= (-2.2857*Cb)+432.85

import cv2
import matplotlib.pyplot as plt
import os
import PIL
from numba import vectorize, float64, uint8


# @vectorize([uint8[:,:](uint8[:,:],uint8[:,:],uin8[:,:])])
# def mask_pixel1(pixel):
#     return mask_pixel(pixel[0],pixel[1],pixel[2])
@vectorize(
    [uint8(uint8, uint8, uint8, uint8, uint8, uint8, uint8, uint8, uint8)])
def mask_pixel(B, G, R, H, S, V, Y, Cr, Cb):
    # B,G,R = pixel_bgr
    # H,S,V = pixel_hsv
    # Y,Cr,Cb = pixel_ycrcb
    if (
        (0.0 <= H <= 50.0 and 0.23 <= S <= 0.68 and\
        R > 95 and G > 40 and B > 20 and R > G and R > B and abs(R - G) > 15)\
        or\
        (R > 95 and G > 40 and B > 20 and R > G and R > B and abs(R - G )> 15 and Cr > 135 and\
        Cb > 85 and Y > 80 and Cr <= (1.5862*Cb)+20 and Cr>=(0.3448*Cb)+76.2069 and\
        Cr >= (-4.5652*Cb)+234.5652 and\
        Cr <= (-1.15*Cb)+301.75 and\
        Cr <= (-2.2857*Cb)+432.85)
        # R > 10
    ):
from numba import jit, uint8

@jit(uint8(uint8, uint8))
def scan(state: int, next_char: int) -> int:
	if state == 0 and next_char == 67: state = 1
	elif state == 1 and next_char == 111: state = 2
	elif state == 2 and next_char == 110: state = 3
	elif state == 3 and next_char == 116: state = 4
	elif state == 4 and next_char == 101: state = 5
	elif state == 5 and next_char == 110: state = 6
	elif state == 6 and next_char == 116: state = 7
	elif state == 7 and next_char == 45: state = 8
	elif state == 8 and next_char == 84: state = 9
	elif state == 9 and next_char == 121: state = 10
	elif state == 10 and next_char == 112: state = 11
	return state