def upsizeImage(img: np.array, target_size: tuple, interpolation_method=None): img, height, width, channels, im_size = getChannels(img) target_hei, target_wid, target_chn = target_size if target_chn != channels: print('Target channels not equal to input.') return px_transform = createPixelTransform((height, width), target_size) output_img = np.zeros(target_size, dtype=np.uint8) i, j, k, float_input_coord, current_target_inds, latest_target_inds, pixel_window, area_weights, value_weights = ( prepareInterpolationBookKeepingVars(img)) for px in np.nditer(output_img, op_flags=['writeonly']): if np.any(np.int32(np.floor(float_input_coord)) != latest_target_inds): getTargetIndices(float_input_coord, current_target_inds) extractPxWindow(current_target_inds[0], img, pixel_window) latest_target_inds = current_target_inds[0] px[...] = areaInterpolation(pixel_window, float_input_coord, current_target_inds, k, area_weights, value_weights) i, j, k = iterateImage(i, j, k, channels, target_wid) if k % channels == 0: float_input_coord = px_transform(i, j) return output_img
def horizontalOutputConvolution(input_img: np.array, output_img: np.array, gauss_conv: np.array, coordinateTransform): i, j, k = 0, 0, 0 update_lag = len(gauss_conv) // 2 _, height, width, channels, _ = getChannels(input_img) conv_queues = [ fixedSizeQueue(gauss_conv, 255, dtype=np.float32) for _ in range(channels) ] valid_width_idx = getValidTransformCordinates(width, update_lag, False, coordinateTransform) valid_height_idx = getValidTransformCordinates(height, 0, True, coordinateTransform) for px in np.nditer(input_img, order='C', op_flags=['readonly']): conv_queues[k].update(px) if j >= update_lag: if valid_height_idx[i] and valid_width_idx[j]: out_i, out_j = np.uint32( np.floor(coordinateTransform(i, j - update_lag))) output_img[out_i, out_j, k] = conv_queues[k].convolve() if j == width - 1 and k == 0 and valid_height_idx[i]: outputStrideConvRemaining(output_img, width, valid_width_idx, conv_queues, channels, i, update_lag, True, coordinateTransform) i, j, k = iterateImage(i, j, k, channels, width) return output_img
def enhanceContrast(img: np.array, channel_range: ChannelRange): img, height, width, channels, im_size = getChannels(img) scalars = getScalars(channel_range, channels) i = 0 j = 0 k = 0 for px in np.nditer(img): img[i, j, k] = putPixelInRange(px, channel_range[k], scalars[k]) i, j, k = iterateImage(i, j, k, channels, width) return img
def getFeatureVectorsFromCornerPoints(harris_points: HarrisPointArray, feature_images: np.ndarray): _, _, width, channels, _ = getChannels(feature_images) n_hist_buckets = harris_points.getHistogramBuckets(channels) i, j, k = 0, 0, 0 for px in np.nditer(feature_images, order = 'C', op_flags = ['readwrite']): if k == 0: matched_point = harris_points.checkAllDistances(i, j) if matched_point: matched_point.updateHistogram(n_hist_buckets, k, px) i, j, k = iterateImage(i, j, k, channels, width) harris_points.normaliseAllHistograms() return harris_points
def horizontalConvolution(output_img: np.array, update_lag: int, conv_queues: list, update_queues: list): i, j, k = 0, 0, 0 _, _, width, channels, _ = getChannels(output_img) for px in np.nditer(output_img, order='C', op_flags=['readwrite']): conv_queues[k].update(px) update_queues[k].put(px) i, j, k = iterateImage(i, j, k, channels, width) if j > update_lag: prev_px = update_queues[k].get() prev_px[...] = conv_queues[k].convolve() if (j == 0) and (k == 0): convolveRemainingPixels(update_queues, conv_queues, channels, update_lag) return output_img
def convertToHSV(img : np.array): _, _, width, channels, im_size = getChannels(img) if channels != 3: print('Error! 3 channels are expected RGB2HSV'); return current_colour = np.ndarray(3, dtype = np.uint8) current_queue = [None] * 3 i = 0; j = 0; k = 0 for px in np.nditer(img, op_flags = ['readwrite']): current_colour[k] = px current_queue[k] = px i, j, k = iterateImage(i, j, k, channels, width) if k == 0: current_colour[:] = convertRGBToHSVColor(current_colour) for chan in range(3): current_queue[chan][...] = current_colour[chan] return img
def convertToGrey(img : np.array, convert_to_value: bool = False): _, height, width, channels, im_size = getChannels(img) if channels != 3 : print('Error! 3 channel image expected RGB2GRAY'); return weight_vector = np.ndarray(3, dtype = np.float32) if convert_to_value: weight_vector.fill(0.333333) else: weight_vector[:] = [0.2989, 0.5870, 0.1140] i = 0; j = 0; k = 0 for px in np.nditer(img, op_flags = ['readwrite']): if k == 0 : update_px = px update_px[...] += np.uint8(px * weight_vector[k]) i, j, k = iterateImage(i, j, k, 3, width) return img[:, :, 0]
def thresholdHarrisWithinDistance(harris_img: np.array, corner_thresh: float, distance_thresh: float, feature_len: int): _, _, width, channels, _ = getChannels(harris_img) if channels != 3 : print('Incorrect numer of input channels'); return harris_points = HarrisPointArray(feature_len, distance_thresh) i, j, k = 0, 0, 0 for px in np.nditer(harris_img, order = 'C', op_flags = ['readwrite']): if k == 0: curr_x = px elif k == 1: curr_y = px else: if px > corner_thresh: if not harris_points.checkAllDistances(i, j): harris_points.addPoint(i, j, curr_y, curr_x, px) i, j, k = iterateImage(i, j, k, 3, width) return harris_points
def computeGradientPolyImage(img: np.array, kernel_size: int): _, height, width, channels, _ = getChannels(img) if channels != 1 : print('Incorrect input size'); return hori_deriv = oneDimConvolution(img.copy(), derivative(kernel_size), True) vert_deriv = oneDimConvolution(img.copy(), derivative(kernel_size), False) output_img = np.dstack((hori_deriv, vert_deriv, np.zeros((height, width), dtype = np.float32))) i, j, k = 0, 0, 0 for px in np.nditer(output_img, order = 'C', op_flags = ['readwrite']): if k == 0: current_x = px elif k == 1: current_y = px else: px[...] = current_x * current_y current_x[...] **= 2 current_y[...] **= 2 i, j, k = iterateImage(i, j, k, 3, width) return output_img
def computeCornerMeasure(img: np.array, cornerMeasure): _, _, width, channels, _ = getChannels(img) if channels != 3 : print('Incorrect numer of input channels'); return i, j, k = 0, 0, 0 max_channels = {0:0,1:0,2:0} min_channels = {0:0,1:0,2:0} for px in np.nditer(img, order = 'C', op_flags = ['readwrite']): if k == 0: current_x = px elif k == 1: current_y = px else: output_vector = cornerMeasure(current_x, current_y, px) current_x[...] = output_vector[0] current_y[...] = output_vector[1] px[...] = output_vector[2] updateMinMaxChannels(output_vector, max_channels, min_channels) i, j, k = iterateImage(i, j, k, 3, width) divideByMaxChannels(img, max_channels, min_channels) return img
def horizontalStrideConvolution(output_img: np.array, gauss_conv: np.array, coordinateTransform): i, j, k = 0, 0, 0 _, _, width, channels, _ = getChannels(output_img) update_lag = len(gauss_conv) // 2 conv_queues = [ fixedSizeQueue(gauss_conv, 255, dtype=np.float32) for _ in range(channels) ] update_queues = [SimpleQueue() for _ in range(channels)] valid_width_idx = getValidTransformCordinates(width, update_lag, False, coordinateTransform) for px in np.nditer(output_img, order='C', op_flags=['readwrite']): conv_queues[k].update(px) update_queues[k].put(px) if j >= update_lag: prev_px = update_queues[k].get() if valid_width_idx[j]: prev_px[...] = conv_queues[k].convolve() i, j, k = iterateImage(i, j, k, channels, width) if j == 0 and k == 0: strideConvolveRemaining(width, channels, update_queues, conv_queues, valid_width_idx) return output_img