示例#1
0
def shuffle(im, lb, num, rotate= False):
  map = {}
  rows=cols=num
  blk_size=im.shape[0]//rows
   
  img_blks=view_as_blocks(im,block_shape=(blk_size,blk_size,3)).reshape((-1,blk_size,blk_size,3))
  lbl_blks=view_as_blocks(lb,block_shape=(blk_size,blk_size)).reshape((-1,blk_size,blk_size))
  
   
  img_shuff=np.zeros((im.shape[0],im.shape[1],3),dtype=np.uint8)
  lbl_shuff=np.zeros((lb.shape[0],lb.shape[1]),dtype=np.uint8)
  lbl_rotn=np.zeros((lb.shape[0]//blk_size,lb.shape[1]//blk_size),dtype=np.uint8)
   
  a=np.arange(rows*rows, dtype=np.uint8)
  b=np.random.permutation(a)
  
  map = {k:v for k,v in zip(a,b)}
  print ("Key Map:-\n" + str(map))
  
  for i in range(0,rows):
    for j in range(0,cols):
     x,y = i*blk_size, j*blk_size
     if(rotate):
      rot_val=random.randrange(0,4)
      lbl_rotn[i,j]=rot_val
      img_shuff[x:x+blk_size, y:y+blk_size] = np.rot90(img_blks[map[i*rows + j]],rot_val)
      lbl_shuff[x:x+blk_size, y:y+blk_size] = lbl_blks[map[i*rows + j]]
     else:
      img_shuff[x:x+blk_size, y:y+blk_size] = img_blks[map[i*rows + j]]
      lbl_shuff[x:x+blk_size, y:y+blk_size] = lbl_blks[map[i*rows + j]]  
  return img_shuff,lbl_shuff,lbl_rotn
示例#2
0
def shuffle_image(im, num):
    map = {}
    rows = cols = num
    blk_size = im.shape[0] // rows

    img_blks = view_as_blocks(im, block_shape=(blk_size, blk_size, 3)).reshape(
        (-1, blk_size, blk_size, 3))
    print("img_blks.shape: ", img_blks.shape)

    img_shuff = np.zeros((im.shape[0], im.shape[1], 3), dtype=np.uint8)
    print("img_shuff.shape: ", img_shuff.shape)

    a = np.arange(rows * rows, dtype=np.uint8)
    b = np.random.permutation(a)

    map = {k: v for k, v in zip(a, b)}
    print("Key Map:-\n" + str(map))

    for i in range(0, rows):
        for j in range(0, cols):
            x, y = i * blk_size, j * blk_size
            img_shuff[x:x + blk_size,
                      y:y + blk_size] = img_blks[map[i * rows + j]]

    shuf_img_blks = view_as_blocks(img_shuff,
                                   block_shape=(blk_size, blk_size,
                                                3)).reshape((-1, blk_size,
                                                             blk_size, 3))
    print("shuf_img_blks.shape: ", shuf_img_blks.shape)

    return img_shuff, img_blks, map, blk_size, shuf_img_blks
示例#3
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten block of histograms into a 1D feature vector
        Here, we treat the entire patch of histograms as our block
    4. Normalize flattened block
        Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi).astype(int) % 180

    # Group entries of G and theta into cells of shape pixels_per_cell, (M, N)
    #   G_cells.shape = theta_cells.shape = (H//M, W//N)
    #   G_cells[0, 0].shape = theta_cells[0, 0].shape = (M, N)
    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    # For each cell, keep track of gradient histrogram of size n_bins
    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    for i in range(rows):
        for j in range(cols):
            G_patch = G_cells[i, j]
            theta_patch = theta_cells[i, j]

            for m in range(pixels_per_cell[0]):
                for n in range(pixels_per_cell[1]):
                    bin_idx = int(theta_patch[m, n] / degrees_per_bin)
                    # print(theta_patch[m, n], bin_idx)
                    cells[i, j, bin_idx] += G_patch[m, n]

        block = cells.flatten()
        block = block / np.linalg.norm(block)

    return block
示例#4
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten block of histograms into a 1D feature vector
        Here, we treat the entire patch of histograms as our block
    4. Normalize flattened block
        Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    # Group entries of G and theta into cells of shape pixels_per_cell, (M, N)
    #   G_cells.shape = theta_cells.shape = (H//M, W//N)
    #   G_cells[0, 0].shape = theta_cells[0, 0].shape = (M, N)
    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    # For each cell, keep track of gradient histrogram of size n_bins
    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    # YOUR CODE HERE
    for i in range(rows):
        for j in range(cols):
            cell_hist = np.histogram(theta_cells[i, j].flatten(),
                                     bins=n_bins,
                                     range=(0, 180),
                                     weights=G_cells[i, j].flatten())[0]
            cells[i, j, :] = cell_hist
    # normalize the HoG
    cells = (cells - np.mean(cells)) / np.std(cells, ddof=1)
    block = cells.flatten()
    # YOUR CODE HERE

    return block
示例#5
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
        'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
        'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]
    cells = np.zeros((rows, cols, n_bins))
    o = pixels_per_cell[0]
    p = pixels_per_cell[1]
    b = np.zeros((n_bins))
    x_theta = np.zeros((o, p))
    y_G = np.zeros((o, p))
    block = np.zeros((patch.shape[0] * patch.shape[1] * n_bins / o / p))
    block = []
    for i in range(0, rows):
        for j in range(0, cols):
            x_theta = flat(theta_cells[i][j])
            y_G = flat(G_cells[i][j])
            x_theta = (x_theta / 20).astype(int)
            for m in range(0, o * p):
                b[x_theta[m]] += 1 * y_G[m]
                pass
            block.append(b)

    block = np.array(block)
    block = simple_descriptor(block)
    # YOUR CODE HERE

    return block
示例#6
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    ### YOUR CODE HERE
    for i in range(rows):
        for j in range(cols):
            for i1 in range(pixels_per_cell[0]):
                for j1 in range(pixels_per_cell[1]):
                    Gt = G_cells[i, j, i1, j1]
                    Tt = theta_cells[i, j, i1, j1]
                    bin = int(Tt // degrees_per_bin) % 9
                    cells[i, j, bin] += Gt
    mean = np.mean(cells)
    std = np.std(cells)
    if std == 0:
        std = 1
    cells = (cells - mean) / std
    block = cells.flatten()
    pass
    ### YOUR CODE HERE

    return block
示例#7
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
        'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
        'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    # YOUR CODE HERE
    for i in range(rows):
        for j in range(cols):
            idxs = theta_cells[i, j] // degrees_per_bin
            idxs[idxs == 9] = 8
            idxs = idxs.astype(int)
            for m in range(G_cells.shape[2]):
                for n in range(G_cells.shape[3]):
                    cells[i, j, idxs[m, n]] += G_cells[i, j, m, n]
            # idxs = idxs.astype(int)
            # cells[i, j, idxs] += G_cells[i, j, idxs]

    # Nomalize across block
    cells = (cells - np.mean(cells)) / np.std(cells)
    block = np.ravel(cells)
    # YOUR CODE HERE

    return block
示例#8
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)

    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))
    # Compute histogram per cell
    ### YOUR CODE HERE
    for i in range(G_cells.shape[0]):
        for j in range(G_cells.shape[1]):
            for k in range(G_cells.shape[2]):
                for l in range(G_cells.shape[3]):
                    #if int(theta_cells[i, j, k, l] / degrees_per_bin) == 9:

                    index = int(theta_cells[i, j, k, l] / degrees_per_bin) - 1
                    #else:
                    #   index = int(theta_cells[i, j, k, l] / degrees_per_bin)
                    cells[i, j, index] += G_cells[i, j, k, l]

    block = cells.flatten()
    block = block / np.linalg.norm(block, ord=2)
    #    block = (block - np.min(block)) / (np.max(block) - np.min(block))
    ### YOUR CODE HERE

    return block
示例#9
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten block of histograms into a 1D feature vector
        Here, we treat the entire patch of histograms as our block
    4. Normalize flattened block
        Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    g_C = view_as_blocks(G, block_shape=pixels_per_cell)
    t_C = view_as_blocks(theta, block_shape=pixels_per_cell)
    r = g_C.shape[0]
    c = g_C.shape[1]

    cells = np.zeros((r, r, n_bins))

    ### YOUR CODE HERE
    for i in range(r):
        for j in range(c):
            for z in range(pixels_per_cell[0]):
                for t in range(pixels_per_cell[1]):
                    d_lower = t_C[i][j][z][t] % degrees_per_bin
                    division_ratio = float(d_lower) / degrees_per_bin
                    lBN = (int(np.trunc(
                        t_C[i][j][z][t] / degrees_per_bin))) % 9
                    uBN = (lBN + 1) % 9
                    cells[i][j][uBN] += (division_ratio * g_C[i][j][z][t])
                    cells[i][j][lBN] += ((1 - division_ratio) *
                                         g_C[i][j][z][t])
            cells[i][j] = cells[i][j] / np.linalg.norm(cells[i][j])
    block = cells.flatten()
    ### YOUR CODE HERE

    return block
示例#10
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))
    # Compute histogram per cell
    ### YOUR CODE HERE
    m = pixels_per_cell[0]
    n = pixels_per_cell[1]
    for i in np.arange(rows):
        for j in np.arange(cols):
            G_cells[i][j] = G[m * i:m * (i + 1), n * j:n * (j + 1)]
            theta_cells[i][j] = theta[m * i:m * (i + 1), n * j:n * (j + 1)]
            G_cells_v = G_cells[i][j].flatten()
            theta_cells_v = ((theta_cells[i][j] // 20) %
                             9).flatten().astype(int)
            for k in np.arange(n_bins):
                cells[i][j][k] = G_cells_v[theta_cells_v == k].sum()
            cells[i][j] = (cells[i][j] - np.mean(cells[i][j])) / np.std(
                cells[i][j])
    block = cells.flatten()
    ### YOUR CODE HERE

    return block
示例#11
0
def hog_descriptor(patch, pixels_per_cell=(8,8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi + 1e-5) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    ### YOUR CODE HERE
    for r in range(rows):
        for c in range(cols):
            bin_id = np.int32((theta_cells[r, c] // degrees_per_bin).flatten())
            G_flat = G_cells[r, c].flatten()
            # print (theta_cells, bin_id)
            for i,b  in enumerate(bin_id):
                cells[r, c, b] += G_flat[i]

    block = cells.flatten()
    block /= np.linalg.norm(block)
    # block /= np.sum(np.abs(block))
    # variance = np.sum(np.var(cells, axis = (0,1)))
    # block = block / np.sqrt(variance)
    ### YOUR CODE HERE

    return block
示例#12
0
def hog_descriptor(patch, pixels_per_cell=(8,8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)
   
    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    for r in range(rows):
        for c in range(cols):
            
            G = G_cells[r,c]
            theta = theta_cells[r,c]
            bin_num = theta//degrees_per_bin
            
            for i in range(pixels_per_cell[0]):
                for j in range(pixels_per_cell[1]):
                    cells[r, c, int(bin_num[i, j])%9] += G[i, j]
                    
    flat = cells.flatten()
    
    block = flat/np.linalg.norm(flat)
    
    return block
示例#13
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(
        G,
        block_shape=pixels_per_cell)  #返回一个4维矩阵,表示一共有m行n列个cell,每个cell的维度是x行y列
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]
    cells = np.zeros((rows, cols, n_bins))
    # Compute histogram per cell
    for i in range(0, rows):
        for j in range(0, cols):
            for n_bin in range(0, n_bins):
                cells[i, j, n_bin] = np.sum(
                    G_cells[i, j] *
                    ((theta_cells[i, j] >= n_bin * degrees_per_bin) &
                     (theta_cells[i, j] < (n_bin + 1) * degrees_per_bin)))
    block = cells.flatten()
    block = block - block.mean()
    if block.std() < 1e-5:
        pass  #方差为0就不动它了
    else:
        block /= block.std()

    return block
示例#14
0
def hog_descriptor(patch, pixels_per_cell=(8,8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)
   
    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]
    
    cells = np.zeros((rows, cols, n_bins))
   
    block = []
    # Compute histogram per cell
    ### YOUR CODE HERE
    for ci in range(rows):
        for cj in range(cols):
            cell = G_cells[ci,cj]
            cellTheta = theta_cells[ci,cj]
            for pi in range(cell.shape[0]):
                for pj in range(cell.shape[1]):
                    tbin = (cellTheta[pi,pj] // degrees_per_bin) % n_bins
                    cells[ci,cj,int(tbin)] += cell[pi,pj]
            cells[ci,cj,:] = cells[ci,cj,:] / np.sum(cells[ci,cj,:])
    block = cells.flatten()
    ### YOUR CODE HERE
    
    return block
示例#15
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
        'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
        'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    # YOUR CODE HERE
    M = G_cells.shape[2]
    N = G_cells.shape[3]
    for i in range(rows):
        for j in range(cols):
            for m in range(M):
                for n in range(N):
                    deg = int((theta_cells[i, j, m, n] + 180) * n_bins) // 360
                    if (deg == 9):
                        deg = 0
                    cells[i, j, deg] += G_cells[i, j, m, n]
    block = np.reshape(cells, newshape=-1)
    # YOUR CODE HERE

    return block
示例#16
0
    def __load__(self, id_name):
        image_path = os.path.join(self.path, id_name,
                                  "lattice_light_sheet") + ".tif"
        mask_path = os.path.join(self.path, id_name, "truth") + ".tif"

        latticeMovieImage = skimage.external.tifffile.imread(image_path)
        latticeMovieImage = latticeMovieImage[:self.image_size, :self.
                                              image_size, :self.image_size]

        #Standardizing globally
        #image_mean = np.mean(latticeMovieImage, axis=(0, 1, 2), keepdims=True)
        #image_std = np.std(latticeMovieImage,  axis=(0, 1, 2), keepdims=True)
        #latticeMovieImage = (latticeMovieImage - image_mean) / image_std

        lattice_patches = view_as_blocks(latticeMovieImage,
                                         block_shape=(self.patch_size,
                                                      self.patch_size,
                                                      self.patch_size))
        lattice_patches = lattice_patches.reshape(
            int((self.image_size / self.patch_size)**3), self.patch_size,
            self.patch_size, self.patch_size)
        lattice_patches = np.expand_dims(lattice_patches, axis=-1)

        mask_image = skimage.external.tifffile.imread(mask_path)
        mask_image = mask_image[:self.image_size, :self.image_size, :self.
                                image_size]
        #mask_image = np.expand_dims(mask_image, axis=-1)

        mask = np.zeros((self.image_size, self.image_size, self.image_size))
        mask = np.maximum(mask, mask_image)

        #TODO Check if view_as_blocks gives all possible blocks
        mask_patches = view_as_blocks(mask,
                                      block_shape=(self.patch_size,
                                                   self.patch_size,
                                                   self.patch_size))
        mask_patches = mask_patches.reshape(
            int((self.image_size / self.patch_size)**3), self.patch_size,
            self.patch_size, self.patch_size)
        mask_patches = np.expand_dims(mask_patches, axis=-1)

        #lattice_patches = lattice_patches/(2/3*65535.0)
        #BOTTOM LINE COMMENTED FOR PSNR5 Data. UNCOMMENT FOR FUTURE USE and use 255.0
        mask_patches = mask_patches / 255.0

        weight_patches = np.zeros((self.patch_size**3, 2))
        weight_patches[:, 0] = 0.005
        weight_patches[:, 1] = 0.995
        weight_patches = np.squeeze(np.sum(weight_patches, axis=-1))

        #print(np.count_nonzero(mask == 1.0)/1000000.0)

        return lattice_patches, mask_patches, weight_patches
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'
    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))
    # Compute histogram per cell
    ### YOUR CODE HERE
    for i in range(rows):
        for j in range(cols):
            hist, _ = np.histogram(a=theta_cells[i, j],
                                   bins=n_bins,
                                   range=(0, 180),
                                   weights=G_cells[i, j])
            cells[i, j] = hist

    # follow `simple_descriptor`, describe the patch by normalizing the image values
    # into a standard normal distribution (having mean of 0 and standard deviation of 1)
    cells = (cells - np.mean(cells)) / np.std(cells)
    block = cells.flatten()
    ### YOUR CODE HERE

    return block
示例#18
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    print(G_cells)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    ### YOUR CODE HERE
    pass
    ### YOUR CODE HERE

    return block


# matches.remove([362, 56])# cheo dai tren
#     matches.remove([404,304])#cheo dai duoi
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. compute the gradient image in x and y (already done for you)
    2. compute gradient histograms
    3. normalize across block 
    4. flattening block into a feature vector

    Args:
        patch: grayscale image patch of shape (h, w)
        pixels_per_cell: size of a cell with shape (m, n)

    Returns:
        block: 1D array of shape ((h*w*n_bins)/(m*n))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    cells = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    Gr = G.reshape((rows, cols, pixels_per_cell[0] * pixels_per_cell[1]))
    cells = np.apply_along_axis(
        lambda x: np.histogram(x, bins=n_bins, density=True)[0],
        axis=2,
        arr=Gr)

    block = cells.flatten()
    block = block / np.sum(block)

    return block
示例#20
0
def hog_descriptor(patch, pixels_per_cell=(8, 8)):
    """
    Generating hog descriptor by the following steps:

    1. Compute the gradient image in x and y directions (already done for you)
    2. Compute gradient histograms for each cell
    3. Flatten 3D matrix of histograms into a 1D feature vector.
    4. Normalize flattened histogram feature vector by L2 norm
       Normalization makes the descriptor more robust to lighting variations

    Args:
        patch: grayscale image patch of shape (H, W)
        pixels_per_cell: size of a cell with shape (M, N)

    Returns:
        block: 1D patch descriptor array of shape ((H*W*n_bins)/(M*N))
    """
    assert (patch.shape[0] % pixels_per_cell[0] == 0),\
                'Heights of patch and cell do not match'
    assert (patch.shape[1] % pixels_per_cell[1] == 0),\
                'Widths of patch and cell do not match'

    n_bins = 9
    degrees_per_bin = 180 // n_bins

    Gx = filters.sobel_v(patch)
    Gy = filters.sobel_h(patch)

    # Unsigned gradients
    G = np.sqrt(Gx**2 + Gy**2)
    theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 180

    # Group entries of G and theta into cells of shape pixels_per_cell, (M, N)
    #   G_cells.shape = theta_cells.shape = (H//M, W//N)
    #   G_cells[0, 0].shape = theta_cells[0, 0].shape = (M, N)
    G_cells = view_as_blocks(G, block_shape=pixels_per_cell)
    theta_cells = view_as_blocks(theta, block_shape=pixels_per_cell)
    rows = G_cells.shape[0]
    cols = G_cells.shape[1]

    # For each cell, keep track of gradient histrogram of size n_bins
    hists = np.zeros((rows, cols, n_bins))

    # Compute histogram per cell
    ### YOUR CODE HERE
    pass
    ### YOUR CODE HERE

    return block
示例#21
0
def load_data_make_jpeg(folder):
    entrynumber = 0
    list = glob.glob(folder)
    for entry in list:
        entrynumber += 1
        img_size = (256, 256, 3)
        img_new = io.imread(entry)

        shape = img_new.shape
        height = shape[0] // 256
        height = height * 256
        width = shape[1] // 256
        width = width * 256

        img_new = img_new[:height, :width, :3]
        img_new_w = view_as_blocks(img_new, img_size)
        img_new_w = np.uint8(img_new_w)

        r = 0
        for i in range(img_new_w.shape[0]):
            for j in range(img_new_w.shape[1]):
                A = np.zeros((img_size[0], img_size[1], 3))
                A[:, :, :] = img_new_w[i, j, :, :]
                A = np.uint8(A)
                imageio.imwrite(
                    '/home/diego/Desktop/Output/' + str(entrynumber) + '-' +
                    str(r) + '.png', A)
                r += 1
示例#22
0
def test_view_as_blocks_2D_array():

    A = np.arange(4 * 4).reshape(4, 4)
    B = view_as_blocks(A, (2, 2))
    assert_equal(B[0, 1], np.array([[2, 3],
                                   [6, 7]]))
    assert_equal(B[1, 0, 1, 1], 13)
示例#23
0
def downscale_all(img, mask, edges, downscale):
    """
    Downscales all the inputs by a given scale.
    :param img: A sparse  flow map - h x w x 2
    :param mask: A binary mask  - h x w x 1
    :param edges:An edges map  - h x w x 1
    :param downscale: Downscaling factor.
    :return: the downscaled versions of the inputs
    """
    from skimage.util.shape import view_as_blocks
    from skimage.transform import rescale
    img[:, :, 0][mask == -1] = np.nan
    img[:, :, 1][mask == -1] = np.nan

    img = img[:(img.shape[0] -
                (img.shape[0] % downscale)), :(img.shape[1] -
                                               (img.shape[1] % downscale)), :]

    blocks = view_as_blocks(img, (downscale, downscale, 2))
    img = np.nanmean(blocks, axis=(-2, -3, -4))

    mask = np.ones_like(img)
    mask[np.isnan(img)] = -1
    mask = mask[:, :, 0]
    img[np.isnan(img)] = 0

    if edges is not None:
        edges = edges[:(edges.shape[0] - (edges.shape[0] % downscale)), :(
            edges.shape[1] - (edges.shape[1] % downscale))]
        edges = rescale(edges, 1 / float(downscale), preserve_range=True)

    return img, mask, edges
示例#24
0
def get_patch(image_data, patch_shape):

    # image_data.shape: (300, 240, 416, 3)
    n_image = image_data.shape[0]

    # block_hight: 15, block_width: 26
    (block_hight, block_width) = get_block_shape(image_data.shape, patch_shape)

    # n_patch: 15 * 16 -> 390
    n_patch = block_hight * block_width

    # Initialize patch_data (390*300, 16, 16, 3)
    patch_data = np.empty([n_patch * n_image] + list(patch_shape),
                          dtype=np.float32)

    # block_reshape: (390, 16, 16, 3)
    block_reshape = [n_patch] + list(patch_shape)
    for i in range(n_image):
        # Get each patch block of a image, and organize the patch block to (390, 16, 16, 3)
        block = view_as_blocks(image_data[i],
                               block_shape=patch_shape).reshape(block_reshape)

        # Add patch from a patch block to the patch_data one by one
        for j in range(n_patch):
            patch_data[i * n_patch + j] = block[j]

    return patch_data
示例#25
0
def downscale_all(sparse_flow, mask, edges, downscale):
    """
    Downscales all the inputs by a given scale.
    :param sparse_flow: A sparse  flow map - h x w x 2
    :param mask: A binary mask  - h x w x 1
    :param edges:An edges map  - h x w x 1
    :param downscale: Downscaling factor.
    :return: the downscaled versions of the inputs
    """
    sparse_flow[:, :, 0][mask == -1] = np.nan
    sparse_flow[:, :, 1][mask == -1] = np.nan

    sparse_flow = sparse_flow[:(sparse_flow.shape[0] -
                                (sparse_flow.shape[0] % downscale)), :(
                                    sparse_flow.shape[1] -
                                    (sparse_flow.shape[1] % downscale)), :]

    blocks = view_as_blocks(sparse_flow, (downscale, downscale, 2))
    sparse_flow = np.nanmean(blocks, axis=(-2, -3, -4))

    mask = np.ones_like(sparse_flow)
    mask[np.isnan(sparse_flow)] = -1
    mask = mask[:, :, 0]
    sparse_flow[np.isnan(sparse_flow)] = 0

    if edges is not None:
        edges = edges[:(edges.shape[0] - (edges.shape[0] % downscale)), :(
            edges.shape[1] - (edges.shape[1] % downscale))]
        edges = rescale(edges, 1 / float(downscale), preserve_range=True)

    return sparse_flow, mask, edges
示例#26
0
def shuffle(im, inverse=False):

    # Configure block size, rows and columns
    blk_size = 56
    rows = np.uint8(img.shape[0] / blk_size)
    cols = np.uint8(img.shape[1] / blk_size)

    # Create a block view on image
    img_blks = view_as_blocks(im,
                              block_shape=(blk_size, blk_size, 3)).squeeze()
    img_shuff = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)

    # Secret key maps
    map = {0: 2, 1: 0, 2: 3, 3: 1}
    inv_map = {v: k for k, v in map.items()}

    # Perform block shuffling
    for i in range(0, rows):
        for j in range(0, cols):
            x, y = i * blk_size, j * blk_size
            if (inverse):
                img_shuff[x:x + blk_size,
                          y:y + blk_size] = img_blks[inv_map[i], inv_map[j]]
            else:
                img_shuff[x:x + blk_size, y:y + blk_size] = img_blks[map[i],
                                                                     map[j]]

    return img_shuff
示例#27
0
def shuffle(im, num, rotate=False):
    im = torch2numpy(im.permute(1, 2, 0))

    rows = cols = num
    blk_size = im.shape[0] // rows

    img_blocks = view_as_blocks(im,
                                block_shape=(blk_size, blk_size, 3)).reshape(
                                    (-1, blk_size, blk_size, 3))
    img_shuffle = np.zeros((im.shape[0], im.shape[1], 3))

    a = np.arange(rows * rows, dtype=np.uint8)
    b = np.random.permutation(a)

    map = {k: v for k, v in zip(a, b)}

    for i in range(0, rows):
        for j in range(0, cols):
            x, y = i * blk_size, j * blk_size
            if rotate:
                rot_val = random.randrange(0, 4)
                img_shuffle[x:x + blk_size, y:y + blk_size] = np.rot90(
                    img_blocks[map[i * rows + j]], rot_val)
            else:
                img_shuffle[x:x + blk_size,
                            y:y + blk_size] = img_blocks[map[i * rows + j]]
    img_shuffle = torch.FloatTensor(img_shuffle).permute(0, 1, 2)
    return img_shuffle
示例#28
0
 def setupPatches(self, dims):
     self.dims = dims
     self.im = mpimg.imread(self.inputfile)
     self.block_shape = (self.dims[0], self.dims[1], self.im.shape[2]) #height, width
     margin=np.mod(self.im.shape,self.block_shape)
     self.im_crop = self.im[:(self.im.shape-margin)[0],:(self.im.shape-margin)[1],:(self.im.shape-margin)[2]]
     self.view = view_as_blocks(self.im_crop, self.block_shape)
示例#29
0
def load_data_make_jpeg():

    img_size = (256, 256, 3)
    img_new = io.imread(
        '/home/diego/MPFI/Images/041719 JCFFRIL 07 Wt3 profile 13 Spiny top montage color for Diego.tif'
    )
    #img_ref  = io.imread('/home/diego/MPFI/Images/CycleGAN_onlyGreen/Unlabels/onlygreen_2.png')
    img_new = img_new[:9984, :16128, :3]
    #img_ref = img_ref[:7424,:5632,:3]

    img_new_w = view_as_blocks(img_new, img_size)
    img_new_w = np.uint8(img_new_w)
    #img_ref_w = view_as_blocks(img_ref, img_size)
    #img_ref_w = np.uint8(img_ref_w)
    r = 0
    for i in range(img_new_w.shape[0]):
        for j in range(img_new_w.shape[1]):

            A = np.zeros((img_size[0], img_size[1], 3))
            #B = np.zeros((img_size[0], img_size[1], 3))

            A[:, :, :] = img_new_w[i, j, :, :]
            #B[:,:,:] = img_ref_w[i,j,:,:]
            A = np.uint8(A)
            #B = np.uint8(B)
            imageio.imwrite(
                '/home/diego/MPFI/Datasets/profile13_numbered/' + str(r) +
                '.png', A)
            #imageio.imwrite('/home/diego/MPFI/Datasets/OnlyGreen/trainB/'+ str(r) + '.png', B)
            r += 1
示例#30
0
            def load_data_make_jpeg(folder):
                list = glob.glob(folder)
                for entry in list:
                    img_size = (256, 256, 3)
                    img_new = io.imread(entry)
                    img_new = (img_new / 256).astype('uint8')
                    shape = img_new.shape
                    height = shape[0] // 256
                    height256 = height * 256
                    width = shape[1] // 256
                    width256 = width * 256

                    img_new = img_new[:height256, :width256, :3]
                    img_new_w = view_as_blocks(img_new, img_size)
                    img_new_w = np.uint8(img_new_w)
                    imageio.imwrite('/home/diego/Desktop/Output_Final/' + 'CroppedVersion' + '.png', img_new)
                    r = 0
                    for i in range(img_new_w.shape[0]):
                        for j in range(img_new_w.shape[1]):
                            A = np.zeros((img_size[0], img_size[1], 3))
                            A[:, :, :] = img_new_w[i, j, :, :]
                            A = np.uint8(A)
                            imageio.imwrite('/home/diego/Desktop/Output/' + str(r) + '.png', A)
                            r += 1
                return width, height
示例#31
0
def test_view_as_blocks_3D_array():

    A = np.arange(4 * 4 * 6).reshape(4, 4, 6)
    B = view_as_blocks(A, (1, 2, 2))
    assert_equal(B.shape, (4, 2, 3, 1, 2, 2))
    assert_equal(B[2:, 0, 2],
                 np.array([[[[52, 53], [58, 59]]], [[[76, 77], [82, 83]]]]))
示例#32
0
def test_view_as_blocks_3D_array():
    A = np.arange(4 * 4 * 6).reshape(4, 4, 6)
    B = view_as_blocks(A, (1, 2, 2))
    assert_equal(B.shape, (4, 2, 3, 1, 2, 2))
    assert_equal(B[2:, 0, 2], np.array([[[[52, 53],
                                          [58, 59]]],
                                        [[[76, 77],
                                          [82, 83]]]]))
示例#33
0
def small_spectrogram_max_pooling(spec):
    """
    Using code adapated from:
    http://scikit-image.org/docs/dev/auto_examples/plot_view_as_blocks.html
    """

    spec = force_spectrogram_length(spec, 384)
    im_norm = (spec - spec.mean()) / spec.var()

    view = view_as_blocks(im_norm, (32, 32))
    flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

    return np.max(flatten_view, axis=2).flatten()
示例#34
0
def getFeature():
    cap = cv2.VideoCapture('/home/xingzhong/Videos/heat.mkv')
    fn = 0
    ret, iframe = cap.read()
    H, W, _ = iframe.shape
    gmask = globalMask(iframe)
    nznsLeft = deque(maxlen=25)
    nznsCenter = deque(maxlen=25)
    nznsRight = deque(maxlen=25)
    features = []
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        # if fn >= 100000: break
        fn += 1

        frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        ret, threshColor = cv2.threshold(
            frameHSV[:, :, 0], 30, 179, cv2.THRESH_BINARY_INV)
        #threshColorMask = cv2.bitwise_and(frame, frame, mask = threshColor)
        #import ipdb; ipdb.set_trace()
        #imgs = view_as_blocks(threshColorMask, block_shape = (H/9, W/16, 3)).reshape(-1, H/9, W/16, 3)
        imgs = view_as_blocks(
            threshColor, block_shape=(H / 9, W / 16)).reshape(-1, H / 9, W / 16)
        nzns = map(lambda x: np.count_nonzero(x) / float(x.size), imgs)
        #import ipdb; ipdb.set_trace()
        #nznLeft  = 3 * np.count_nonzero(threshColorMask[:, :W/3]) / float(threshColorMask.size)
        #nznCenter = 3 * np.count_nonzero(threshColorMask[:, W/3:2*W/3]) / float(threshColorMask.size)
        #nznRight = 3 * np.count_nonzero(threshColorMask[:, -W/3:]) / float(threshColorMask.size)
            # nznsLeft.append(nznLeft)
            # nznsCenter.append(nznCenter)
            # nznsRight.append(nznRight)
        features.append(nzns)
        if fn % 100 == 0:
            cv2.putText(frame, "#f %d" %
                        fn, (10, 30), FONT, 1, (255, 255, 255), 1, cv2.LINE_AA)
            #cv2.putText(threshColorMask, "%d"%(int(100*nznLeft)) ,(W/6, 100), FONT, 1,(255,255,255),1,cv2.LINE_AA)
            #cv2.putText(threshColorMask, "%d"%(int(100*nznCenter)) ,(W/2, 100), FONT, 1,(255,255,255),1,cv2.LINE_AA)
            #cv2.putText(threshColorMask, "%d"%(int(100*nznRight)) ,(5*W/6, 100), FONT, 1,(255,255,255),1,cv2.LINE_AA)
            mask = np.array(nzns).reshape(9, 16)
            cv2.imshow('mask', mask)
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    pickle.dump(features, open("segment_features_full.p", "wb"))
    cap.release()
    cv2.destroyAllWindows()
示例#35
0
def _downsample(array, factors, sum=True):
    """Performs downsampling with integer factors.

    Parameters
    ----------
    array : ndarray
        Input n-dimensional array.
    factors: tuple
        Tuple containing downsampling factor along each axis.
    sum : bool
        If True, downsampled element is the sum of its corresponding
        constituent elements in the input array. Default is True.

    Returns
    -------
    array : ndarray
        Downsampled array with same number of dimensions as that of input
        array.

    """

    pad_size = []
    if len(factors) != array.ndim:
        raise ValueError("'factors' must have the same length "
                         "as 'array.shape'")
    else:
        for i in range(len(factors)):
            if array.shape[i] % factors[i] != 0:
                pad_size.append(factors[i] - (array.shape[i] % factors[i]))
            else:
                pad_size.append(0)

    for i in range(len(pad_size)):
        array = _pad_asymmetric_zeros(array, pad_size[i], i)

    out = view_as_blocks(array, factors)
    block_shape = out.shape

    if sum:
        for i in range(len(block_shape) // 2):
            out = out.sum(-1)
    else:
        for i in range(len(block_shape) // 2):
            out = out.mean(-1)
    return out
示例#36
0
def frequency_max_pooling(spec, normalise=True):
    """
    Using code adapated from:
    http://scikit-image.org/docs/dev/auto_examples/plot_view_as_blocks.html
    """

    if normalise:
        im_norm = (spec - spec.mean()) / spec.var()
    else:
        im_norm = spec

    view = view_as_blocks(im_norm, (8, spec.shape[1]))
    flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

    A = np.max(flatten_view, axis=2).flatten()
    B = np.var(flatten_view, axis=2).flatten()
    C = np.mean(flatten_view, axis=2).flatten()

    return np.hstack((A, B, C))
    def get_subvolume(self, box_zyx, scale=0):
        """
        Extract the subvolume, specified in new (scaled) coordinates from the
        original volume service, then scale result accordingly before returning it.
        """
        true_scale = scale + self.scale_delta
        
        if true_scale in self.original_volume_service.available_scales:
            # The original source already has the data at the necessary scale.
            return self.original_volume_service.get_subvolume( box_zyx, true_scale )

        # Start with the closest scale we've got
        base_scales = np.array(self.original_volume_service.available_scales)
        i_best = np.abs(base_scales - true_scale).argmin()
        best_base_scale = base_scales[i_best]
        
        delta_from_best = true_scale - best_base_scale

        if delta_from_best > 0:
            orig_box_zyx = box_zyx * 2**delta_from_best
            orig_data = self.original_volume_service.get_subvolume(orig_box_zyx, best_base_scale)

            if self.dtype == np.uint64:
                # Assume that uint64 means labels.
                downsampled_data, _ = downsample_labels_3d( orig_data, 2**self.scale_delta )
            else:
                downsampled_data = downsample_raw( orig_data, self.scale_delta )[-1]
            return downsampled_data
        else:
            upsample_factor = int(2**-delta_from_best)
            orig_box_zyx = downsample_box(box_zyx, np.array(3*(upsample_factor,)))
            orig_data = self.original_volume_service.get_subvolume(orig_box_zyx, best_base_scale)

            orig_shape = np.array(orig_data.shape)
            upsampled_data = np.empty( orig_shape * upsample_factor, dtype=self.dtype )
            v = view_as_blocks(upsampled_data, 3*(upsample_factor,))
            v[:] = orig_data[:,:,:,None, None, None]

            relative_box = box_zyx - upsample_factor*orig_box_zyx[0]
            requested_data = upsampled_data[box_to_slicing(*relative_box)]

            # Force contiguous so caller doesn't have to worry about it.
            return np.asarray(requested_data, order='C')
示例#38
0
def demo_upsample_nearest():
    a = sp.misc.lena() / 1.
    a = sp.misc.lena() / 1.
    a.shape = a.shape[:2] + (1,)
    print a.shape
    #print np.tile(a, 2).shape
    #a = np.dstack((a, -a))
    N = 96
    a = np.tile(a, N)
    a[:,:,95] = -a[:,:,95]

    #r = np.tile(a, (2, 2, 1))
    #np.kron(a, np.ones((2,2,1))).shape

    # -- loop
    #a = a[:, :, 0].reshape(256, 256, 1)
    #r = np.empty((1024, 1024, 1))
    #r[0::2, 0::2] = a
    #r[0::2, 1::2] = a
    #r[1::2, 0::2] = a
    #r[1::2, 1::2] = a

    # -- block view
    r = np.empty((1024, 1024, N))
    b = view_as_blocks(r, (2, 2, 1))
    print b.shape

    a2 = a.reshape(a.shape + (1, 1, 1))
    #a[:, :, :, np.newaxis, np.newaxis, np.newaxis]
    b[:] = a2
    #import IPython; ipshell = IPython.embed; ipshell(banner1='ipshell')


    #b2 = b.swapaxes(1, 3).reshape(r.shape)
    b2 = b.transpose((0, 3, 1, 4, 2, 5)).reshape(r.shape)

    pl.matshow(b2[:,:,0])
    pl.matshow(b2[:,:,1])
    pl.matshow(b2[:,:,95])
    pl.show()
def extract_blocks(img_gray):
  patch_sizes = [128, 256]
  result = []
  for size in patch_sizes:
    blocks = view_as_blocks(img_gray, (size, size))
    for row in range(blocks.shape[0]):
      for col in range(blocks.shape[1]):
        block = blocks[row, col]
        pred = rc.predict(block)
        if pred == None:
          continue
        pred_prob = rc.predict_prob(block)[0]
        top1 = numpy.argsort(pred_prob)[-1:][0]
        top1_prob = pred_prob[top1]
        tops = numpy.argsort(pred_prob)[-5:]
        tops = tops[::-1]
        result.append((top1_prob, pred[0], row, col, size, block))
        #print "Size", size, "Prediction:", pred, "Argmax:", numpy.argmax(pred_prob), "Class:", classes[numpy.argmax(pred_prob)]
        #for idx, top in enumerate(tops):
        #  print "", idx+1, ": ", classes[top], " : ", pred_prob[top]
        #print "="*80
  return result
示例#40
0
 if 'zz_blank' in fname:
     continue
 out_fname = fname + ext
 if path.exists(out_fname):
     continue
 arr = misc.imread(fname, flatten=True)
 arr = misc.imresize(arr, in_shape).astype('float32')
 arr -= arr.min()
 arr /= arr.max()
 farr = model.transform(
     arr,
     pad_apron=pad_apron, interleave_stride=interleave_stride
     )
 if reduce is not None:
     assert block is not None
     farr_b = view_as_blocks(farr, block + (1,))
     farr_br = farr_b.reshape(farr_b.shape[:3] + (-1,))
     if reduce == 'mean':
         farr_brm = farr_br.mean(-1)
     elif reduce == 'min':
         farr_brm = farr_br.min(-1)
     elif reduce == 'max':
         farr_brm = farr_br.max(-1)
     elif reduce == 'median':
         farr_brm = np.median(farr_br, axis=-1)
     else:
         raise ValueError("'%s' reduce not understood" % reduce)
     farr = farr_brm
 print farr.shape, out_fname
 np.save(out_fname, farr)
 end = time.time()
示例#41
0
def test_view_as_blocks_block_not_a_tuple():
    A = np.arange(10)
    with testing.raises(TypeError):
        view_as_blocks(A, [5])
示例#42
0
def test_view_as_blocks_negative_shape():
    A = np.arange(10)
    with testing.raises(ValueError):
        view_as_blocks(A, (-2,))
示例#43
0
def test_view_as_blocks_block_too_large():
    A = np.arange(10)
    with testing.raises(ValueError):
        view_as_blocks(A, (11,))
示例#44
0
def test_view_as_blocks_1D_array_wrong_block_shape():
    A = np.arange(10)
    with testing.raises(ValueError):
        view_as_blocks(A, (3,))
示例#45
0
def test_view_as_blocks_1D_array():

    A = np.arange(10)
    B = view_as_blocks(A, (5,))
    assert_equal(B, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
示例#46
0
def test_view_as_blocks_block_not_a_tuple():

    A = np.arange(10)
    view_as_blocks(A, [5])
示例#47
0
def test_view_as_blocks_wrong_block_dimension():

    A = np.arange(10)
    view_as_blocks(A, (2, 2))
示例#48
0
def test_view_as_blocks_1D_array_wrong_block_shape():

    A = np.arange(10)
    view_as_blocks(A, (3,))
示例#49
0
def test_view_as_blocks_block_too_large():

    A = np.arange(10)
    view_as_blocks(A, (11,))
示例#50
0
def test_view_as_blocks_negative_shape():

    A = np.arange(10)
    view_as_blocks(A, (-2,))
示例#51
0
def test_view_as_blocks_wrong_block_dimension():
    A = np.arange(10)
    with testing.raises(ValueError):
        view_as_blocks(A, (2, 2))
def main():
    print "Load data"

    sourceHeight = 'height_masked_final.asc'
    sourceBiomass = 'ndvi_masked_final.asc'

    sourceCoverageModel = 'testCov.txt'
    sourceHeightModel = 'heightModel.txt'
    sourceLCC = "LCC.asc"

    heightGrid = numpy.loadtxt(sourceHeight, skiprows=6)
    heightGrid = heightGrid[500:1050, 300:750]
    #    heightGrid = heightGrid[640:685,455:500]
    #    heightGrid = heightGrid[860:890,600:700]
    rgb = (heightGrid - numpy.min(heightGrid)) / (numpy.max(heightGrid) - numpy.min(heightGrid))
    rgb *= 255
    heightRGBA = numpy.zeros((heightGrid.shape[0], heightGrid.shape[1], 3), dtype=numpy.uint8)
    heightRGBA[:, :, 0:3] = rgb[:, :, numpy.newaxis]
    # misc.imsave('heightMap_Paulinapolder.png',heightRGBA)
    ndviGrid = numpy.loadtxt(sourceBiomass, skiprows=6)
    ndviGrid = ndviGrid[500:1050, 300:750]
    #    ndviGrid = ndviGrid[640:685,455:500]
    #    ndviGrid = ndviGrid[860:890,600:700]
    rgb = (ndviGrid - numpy.min(ndviGrid)) / (numpy.max(ndviGrid) - numpy.min(ndviGrid))
    rgb *= 255
    ndviRGBA = numpy.zeros((ndviGrid.shape[0], ndviGrid.shape[1], 3), dtype=numpy.uint8)
    ndviRGBA[:, :, 0:3] = rgb[:, :, numpy.newaxis]
    misc.imsave('ndviMap_Paulinapolder.png', ndviRGBA)
    lccGrid = numpy.loadtxt(sourceLCC, skiprows=6)
    lccGrid = lccGrid[500:1050, 300:750]
    heightModelGrid = numpy.loadtxt(sourceHeightModel)
    coverageModelGrid = numpy.loadtxt(sourceCoverageModel)
    PaulinaPolder = False
    NDVI = True
    LCC = False
    if PaulinaPolder:
        if NDVI and LCC:
            vegetationMask = ndviGrid > 0
        elif NDVI:
            vegetationMask = ndviGrid > 0.08  # 0.02 demo
        #             figure()
        #             tempveg = numpy.zeros((ndviGrid.shape[0],ndviGrid.shape[1],3))
        #             tempveg[:,:,:] = (ndviGrid[:,:,numpy.newaxis]+1) / 2.0;
        #             imshow(tempveg)
        #             show()
        #             figure()
        #             tempveg = numpy.zeros((vegetationMask.shape[0],vegetationMask.shape[1],3))
        #             tempveg[:,:,:] = vegetationMask[:,:,numpy.newaxis];
        #             imshow(tempveg)
        #             show()
        #             figure()
        #             tempveg = numpy.zeros((vegetationMask.shape[0],vegetationMask.shape[1],3))
        #             tempveg[:,:,:] = heightGrid[:,:,numpy.newaxis] / numpy.max(heightGrid);
        #             imshow(tempveg)
        #             show()
        elif LCC:
            vegetationMask = lccGrid > 0
        heightValues = heightGrid[vegetationMask]
        baseValues = ndviGrid[vegetationMask]
        lccValues = lccGrid[vegetationMask]
        lengthX, lengthY = heightGrid.shape
        nTypes = 7
        area = "NDVI"

        lXTemp = lengthX
        lYTemp = lengthY
        if lengthX % 2 == 1:
            lXTemp += 1
        if lengthY % 2 == 1:
            lYTemp += 1
        vegetationMaskExtended = np.zeros((lXTemp, lYTemp), dtype=bool)
        vegetationMaskExtended[0:lengthX, 0:lengthY] = vegetationMask
        res = 2.0  # 2.0
        wangGridLengthX = np.ceil(lengthX / res)
        wangGridLengthY = np.ceil(lengthY / res)
        xWangIndices, yWangIndices = numpy.indices((wangGridLengthX, wangGridLengthY))
        blocks = view_as_blocks(vegetationMaskExtended, block_shape=(int(res), int(res)))
        blocks = blocks.reshape(wangGridLengthX, wangGridLengthY, res * res)
        blocks_summed = np.sum(blocks, axis=2)
        wangVegetationMask = blocks_summed > 0
        print "wvm", wangVegetationMask.shape
        print "vm", vegetationMask.shape
    else:
        vegetationMask = coverageModelGrid > 0
        heightValues = heightModelGrid[vegetationMask] * 1
        baseValues = coverageModelGrid[vegetationMask] * .01
        lengthX, lengthY = heightModelGrid.shape
        nTypes = 3
        heightValues += numpy.fabs(numpy.min(heightValues))
        minHeight = numpy.min(heightValues)
        maxHeight = numpy.max(heightValues)
        heightValues = (heightValues - minHeight) / (maxHeight - minHeight)
        rgb = (heightModelGrid - numpy.min(heightModelGrid)) / (numpy.max(heightModelGrid) - numpy.min(heightModelGrid))
        rgb *= 255
        heightRGBA = numpy.zeros((heightModelGrid.shape[0], heightModelGrid.shape[1], 3), dtype=numpy.uint8)
        heightRGBA[:, :, 0:3] = rgb[:, :, numpy.newaxis]
        # misc.imsave('heightMap_Ecomodel.png',heightRGBA)

        rgb = (coverageModelGrid - numpy.min(coverageModelGrid)) / (
        numpy.max(coverageModelGrid) - numpy.min(coverageModelGrid))
        rgb *= 255
        coverageRGBA = numpy.zeros((coverageModelGrid.shape[0], coverageModelGrid.shape[1], 3), dtype=numpy.uint8)
        coverageRGBA[:, :, 0:3] = rgb[:, :, numpy.newaxis]
        # misc.imsave('coverageMap_Ecomodel.png',coverageRGBA)

        heightValues *= 100
        area = "MODEL"

        tileIDs = numpy.arange(0, heightValues.shape[0])
        tileIDsGrid = numpy.zeros((lengthX, lengthY))
        tileIDsGrid[vegetationMask] = tileIDs

        wangRes = 1
        res = 1
        wangLengthX = lengthX * res
        wangLengthY = lengthY * res

        wangVegetationMask = numpy.zeros((wangLengthX, wangLengthY), dtype=bool)
        xWangIndices, yWangIndices = numpy.indices((wangLengthX, wangLengthY))
        xw = numpy.trunc(xWangIndices / res).astype(int)
        yw = numpy.trunc(yWangIndices / res).astype(int)
        wangVegetationMask[xWangIndices, yWangIndices] = vegetationMask[xw, yw]
        print wangVegetationMask.shape

    nTiles = heightValues.shape[0]
    plantTypeLevels = numpy.zeros((nTypes))

    if PaulinaPolder:
        if LCC:
            area2 = "LCC"
        elif NDVI:
            area2 = "NDVI"
        heightMatrix = getHeightMatrix("NDVI")
        coverageMatrix = getCoverageMatrix(area2) * .01
        hurstMatrix = getHurstMatrix("NDVI")
    else:
        heightMatrix = getHeightMatrix("MODEL")
        coverageMatrix = getCoverageMatrix("MODEL") * .01
        hurstMatrix = getHurstMatrix("MODEL")

    unit = 1.0
    heightBins = numpy.zeros((2, heightValues.shape[0]))
    heightBins[:, :] = (heightValues / unit)[numpy.newaxis, :]
    heightBins = numpy.trunc(heightBins)
    heightBins[1, :] += 1
    heightBins *= unit
    heightBins = numpy.unique(heightBins)
    #     print heightBins
    heightCoverage = numpy.zeros((nTypes, heightBins.size))
    heightHurst = numpy.zeros((nTypes, heightBins.size))

    for i in range(0, nTypes):
        fc = interpolate.interp1d(heightMatrix, coverageMatrix[i], kind='slinear', bounds_error=False, fill_value=0)
        fh = interpolate.interp1d(heightMatrix, hurstMatrix[i], kind='slinear', bounds_error=False, fill_value=0)
        heightCoverage[i] = fc(heightBins)
        heightHurst[i] = fh(heightBins)

    print "Declare data"
    coveragePerTile = numpy.zeros((nTypes, nTiles), dtype=float)
    hurstPerTile = numpy.zeros((nTypes, nTiles), dtype=float)
    constraintsPerTile = numpy.zeros((nTypes, nTiles), dtype=float)
    compositionPerTile = numpy.zeros((nTypes, nTiles), dtype=float)
    ndviPerTile = numpy.ones((nTypes, nTiles), dtype=float)

    print "Get data"
    for i in range(0, nTypes):
        fc = interpolate.interp1d(heightBins, heightCoverage[i], kind='slinear', bounds_error=False, fill_value=0)
        fh = interpolate.interp1d(heightBins, heightHurst[i], kind='slinear', bounds_error=False, fill_value=0)
        coveragePerTile[i] = fc(heightValues)
        hurstPerTile[i] = fh(heightValues)
        if NDVI and LCC:
            ndviPerTile[i] = calculateCovNDVI(area, i, baseValues)
            lccTiles = calculateCovLCC(i, lccValues)
            constraintsPerTile[i] = numpy.minimum(ndviPerTile[i], lccTiles)
            compositionPerTile[i] = numpy.minimum(coveragePerTile[i], constraintsPerTile[i])
        elif NDVI:
            ndviPerTile[i] = calculateCovNDVI(area, i, baseValues)
            constraintsPerTile[i] = ndviPerTile[i]
            compositionPerTile[i] = numpy.minimum(coveragePerTile[i], constraintsPerTile[i])
        elif LCC:
            constraintsPerTile[i] = calculateCovLCC(i, lccValues)
            compositionPerTile[i] = numpy.minimum(coveragePerTile[i], constraintsPerTile[i])

    if LCC:
        if NDVI:
            tempCov = numpy.minimum(ndviPerTile, coveragePerTile)
        else:
            tempCov = numpy.minimum(ndviPerTile, coveragePerTile)  # coveragePerTile.copy()#numpy.ones((nTypes,nTiles))
        lccGroups = 4
        noCoveragePerTile = 1 - numpy.sum(tempCov, axis=0)
        noCoveragePerTile[noCoveragePerTile < 0] = 0
        for i in range(0, lccGroups):
            lccMask = numpy.where(lccValues == i + 1)[0]
            plantsLCC = getLCCTypes(i + 1)
            tcon = constraintsPerTile[:, lccMask]
            tcon = tcon[plantsLCC, :]
            totCovlcc = numpy.sum(tcon, axis=0)
            for j in range(0, plantsLCC.shape[0]):
                compositionPerTile[plantsLCC[j], lccMask] /= (totCovlcc + noCoveragePerTile[lccMask])
    elif NDVI:
        totCov = numpy.sum(compositionPerTile, axis=0)
        noCoveragePerTile = 1 - totCov
        noCoveragePerTile[noCoveragePerTile < 0] = 0
        compositionPerTile /= totCov + noCoveragePerTile

    xWang = xWangIndices[wangVegetationMask]
    yWang = yWangIndices[wangVegetationMask]
    nWangTiles = xWang.shape[0]
    print "Start Point Generation"
    if PaulinaPolder:
        tileIDs = numpy.arange(0, nTiles)
        tileIDsGrid = numpy.zeros((lengthX, lengthY))
        tileIDsGrid[vegetationMask] = tileIDs
        dist = numpy.array([.4]) / res  # .4 standard # .8 medium# 1.6 low
        points = wangtiles.generatePoints_cornerbased(wangGridLengthX, wangGridLengthY, nWangTiles, xWang, yWang, dist,
                                                      1, 4)
        points[:, 0:2] *= res
        trunckedPoints = np.trunc(points[:, 0:2]).astype(int)
        cull = vegetationMaskExtended[trunckedPoints[:, 0], trunckedPoints[:, 1]]
        points = points[cull, :]
        trunckedPoints = trunckedPoints[cull, :]
        points[:, 3] = tileIDsGrid[trunckedPoints[:, 0], trunckedPoints[:, 1]]
    else:
        dist = numpy.array([.33]) / wangRes
        points = wangtiles.generatePoints_cornerbased(wangLengthX, wangLengthY, nWangTiles, xWang, yWang, dist, 1, 4)
        points[:, 0:2] *= wangRes
        trunckedPoints = np.trunc(points[:, 0:2] / (res * wangRes)).astype(int)
        points[:, 3] = tileIDsGrid[trunckedPoints[:, 0], trunckedPoints[:, 1]]

    figure(constrain_navigation=True, constrain_ratio=True, antialiasing=True)
    # imshow(numpy.rot90(tempveg,1))
    plot(points[:, 0], points[:, 1], color='w', primitive_type='POINTS', marker='.', marker_size=6)
    show()
    totalPoints = points.shape[0]
    print totalPoints
    print totalPoints / (nTiles * 1.0);

    hurstPoints = hurstPerTile[:, points[:, 3].astype(int)]
    coveragePoints = compositionPerTile[:, points[:, 3].astype(int)]
    heightPoints = heightValues[points[:, 3].astype(int)]
    nocoveragePoints = noCoveragePerTile[points[:, 3].astype(int)]
    basePoints = constraintsPerTile[:, points[:, 3].astype(int)]

    print "Generate MultiFractal Noise"
    scaleFactor = math.sqrt(totalPoints / (nTiles * 1.0)) * 0.5
    cNoisePoints = fractalnoise.generateFractalNoise(points[:, 0:2] * scaleFactor, nTypes, hurstPoints, hurstPoints)

    avgFreqT = numpy.average(hurstPoints, axis=1)
    meanFreq = numpy.average(avgFreqT)
    sd = numpy.fabs(avgFreqT - meanFreq)
    order = numpy.argsort(sd)[::-1]

    cNoisePoints2 = cNoisePoints.copy()
    maxHurst = numpy.max(cNoisePoints2, axis=1)
    minHurst = numpy.min(cNoisePoints2, axis=1)
    noiseTypes = (cNoisePoints2 - minHurst[:, numpy.newaxis]) / (
    maxHurst[:, numpy.newaxis] - minHurst[:, numpy.newaxis])
    for i in range(0, nTypes):
        noiseColors = numpy.zeros((points.shape[0], 3))
        noiseColors += noiseTypes[i, :, numpy.newaxis]
        figure(constrain_navigation=True, constrain_ratio=True, antialiasing=True)
        plot(points[:, 0], points[:, 1], primitive_type='POINTS', color=noiseColors, marker='.', marker_size=6)
        show()

    print "Start classification process"
    pointType = PlantSpeciesGeneration.speciesGeneration(cNoisePoints, coveragePoints.copy(), heightPoints, basePoints, points, heightCoverage,
                               nocoveragePoints.copy(), unit, order, plantTypeLevels)

    #     pointType = classify_final(cNoisePoints,coveragePoints, heightPoints,diffHeight,points,False)

    print "visualize and writing"
    resultMask = pointType > 0;
    fractalValues = cNoisePoints.copy();
    resultTotPoints = numpy.sum(resultMask);
    maxfract = numpy.max(fractalValues, axis=1);
    fractalValues /= maxfract[:, numpy.newaxis];
    selectedFractalValues = fractalValues[:, (pointType - 1)];

    results = numpy.zeros((resultTotPoints), dtype=[('x', numpy.float64), ('y', numpy.float64), ('t', numpy.int64)])
    results['x'] = points[resultMask, 0]
    results['y'] = points[resultMask, 1]
    results['t'] = pointType[resultMask]
    #    results['f'] = selectedFractalValues[resultMask];

    pointColors = getColor(pointType[resultMask])

    # numpy.savetxt('locations_ecomodel.txt',results, delimiter=" ", fmt="%s")
    tot2 = numpy.sum(coveragePoints, axis=0) + nocoveragePoints
    tot2[tot2 == 0] = 1
    coveragePoints /= tot2
    tot = float(points.shape[0])
    print numpy.sum(nocoveragePoints) / tot, numpy.sum(pointType == 0) / tot
    print numpy.sum(coveragePoints[0, :]) / tot, numpy.sum(pointType == 1) / tot
    print numpy.sum(coveragePoints[1, :]) / tot, numpy.sum(pointType == 2) / tot
    print numpy.sum(coveragePoints[2, :]) / tot, numpy.sum(pointType == 3) / tot

    if PaulinaPolder:
        print numpy.sum(coveragePoints[3, :]) / tot, numpy.sum(pointType == 4) / tot
        print numpy.sum(coveragePoints[4, :]) / tot, numpy.sum(pointType == 5) / tot
        print numpy.sum(coveragePoints[5, :]) / tot, numpy.sum(pointType == 6) / tot
        print numpy.sum(coveragePoints[6, :]) / tot, numpy.sum(pointType == 7) / tot

    if PaulinaPolder:
        interval = int(20)
        bins = 31
    else:
        interval = int(5)
        bins = 21
    coverageStatistics = numpy.zeros((nTypes, 2, bins))
    for t in range(0, nTypes):
        for i in range(0, bins):
            b1 = i * interval
            b2 = i * interval + interval
            m1 = heightPoints >= b1
            m2 = heightPoints < b2
            m = numpy.logical_and(m1, m2)
            tot = float(numpy.sum(m))
            if tot == 0:
                tot = 1
            coverageStatistics[t, 0, i] = numpy.sum(coveragePoints[t, m]) / tot
            coverageStatistics[t, 1, i] = numpy.sum(pointType[m] == t + 1) / tot
        print coverageStatistics[t, :]

    if PaulinaPolder:
        figure(constrain_navigation=True, constrain_ratio=True, antialiasing=True)
        plot(results['x'], results['y'], primitive_type='POINTS', color=pointColors, marker='.', marker_size=6)
        show()
    else:
        figure(constrain_navigation=True, constrain_ratio=False, antialiasing=True)
        plot(results['x'], results['y'], primitive_type='POINTS', color=pointColors, marker='.', marker_size=5)
        show()
import matplotlib.cm as cm

from skimage import data
from skimage import color
from skimage.util.shape import view_as_blocks


# -- get `astronaut` from skimage.data in grayscale
l = color.rgb2gray(data.astronaut())

# -- size of blocks
block_shape = (4, 4)

# -- see `astronaut` as a matrix of blocks (of shape
#    `block_shape`)
view = view_as_blocks(l, block_shape)

# -- collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

# -- resampling `astronaut` by taking either the `mean`,
#    the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)

# -- display resampled images
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax0, ax1, ax2, ax3 = axes.ravel()

ax0.set_title("Original rescaled with\n spline interpolation (order=3)")