def resize(dstfile, srcfile, ratio=None): """Writes srcfile scaled by ratio (float) to dstfile. Resizes each dimension by the ratio, so the area of each frame is actually resized by ratio^2. If ratio is None, use 1.0 / gcd(width, height). 1.0 / ratio should always be very close to an integer.""" # shape = nframes, height, width, ?colordepth vidarr = vidread(srcfile) nframes, height, width = vidarr.shape[:3] if ratio == None: ratio = 1.0 / gcd(width, height) rate = int(1.0 / ratio + 0.5) # offset is so we round to nearest print 'resizing', srcfile, '-->', dstfile, 'by 10' print "vidarr.shape=", vidarr.shape print "rate", rate, "ratio", ratio smallervid = np.zeros([nframes, ratio * height, ratio * width, 3]) print "smallervid.shape", smallervid.shape # rate -> X - rate + 1, wat, -> (1.0 - ratio) * X + 1, wat wat # NOTE: do not run #averagewindow = np.ones([(1.0 - ratio) * height + 1, (1.0 - ratio) * width + 1]) * (ratio ** 2) averagewindow = np.ones([rate, rate]) * (ratio ** 2) print "averagewindow.shape", averagewindow.shape for t in tqdm(range(nframes)): frame = vidarr[t] smallerframe = smallervid[t] for i in range(smallerframe.shape[0]): for j in range(smallerframe.shape[1]): window = frame[i*rate:(i+1)*rate,j*rate:(j+1)*rate] # set the pixel [i,j] rgb values smallerframe[i,j,0] = conv2d(window[:,:,0], averagewindow, mode='valid') smallerframe[i,j,1] = conv2d(window[:,:,1], averagewindow, mode='valid') smallerframe[i,j,2] = conv2d(window[:,:,2], averagewindow, mode='valid') return vidsave(dstfile, smallervid)
def f(img): Gaussian = np.asarray([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16 lowpass3 = np.ones([3, 3]) / 9 lowpass5 = np.ones([5, 5]) / 25 return conv2d(img, Gaussian, 'same'), conv2d(img, lowpass3, 'same', 'wrap'), conv2d(img, lowpass5, 'same', 'wrap')
def hvLines(arr): """Expects arr to be time x height x width. Returns a sobel filtered array of the input""" lines = arr.copy() xx, yy = np.meshgrid((1, -1), (1, -1)) for t in tqdm(range(arr.shape[0])): dx = conv2d(arr[t], xx, mode='same') dy = conv2d(arr[t], yy, mode='same') for h in range(arr.shape[1]): for w in range(arr.shape[2]): lines[t, h, w] = squaredSum(dx[h,w], dy[h,w]) return lines
def hvLines(arr): """Expects arr to be time x height x width. Returns a sobel filtered array of the input""" lines = arr.copy() xx, yy = np.meshgrid((1, -1), (1, -1)) for t in tqdm(range(arr.shape[0])): dx = conv2d(arr[t], xx, mode='same') dy = conv2d(arr[t], yy, mode='same') for h in range(arr.shape[1]): for w in range(arr.shape[2]): lines[t, h, w] = squaredSum(dx[h, w], dy[h, w]) return lines
def fh(img, A=2): h1 = np.asarray([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) / 9 h2 = np.asarray([[0.17, 0.67, 0.17], [0.67, -3.33, 0.67], [0.17, 0.67, 0.17]]) def highboost(A=A): f = np.ones([3, 3]) * -1 f[1, 1] = 9 * A - 1 return f return conv2d(img, highboost(), 'same'), conv2d(img, h1, 'same', 'wrap'), conv2d(img, h2, 'same', 'wrap')
def numpy_update(alive_map1, alive_map2, alive_map3, conv_kernel): """Perform one step of a cellular automaton with 3 channels""" # Convolve conv_result1 = conv2d(alive_map1 + alive_map2, conv_kernel, mode='same') conv_result2 = conv2d(alive_map2 + alive_map3, conv_kernel, mode='same') conv_result3 = conv2d(alive_map3 + alive_map1, conv_kernel, mode='same') # Apply game rules condition1 = (conv_result1 > 0) * 2 - 1 condition2 = (conv_result2 > 0) * 2 - 1 condition3 = (conv_result3 > 0) * 2 - 1 np.copyto(alive_map1, condition1) np.copyto(alive_map2, condition2) np.copyto(alive_map3, condition3)
def findLaserCenter(img, ks=9): assert ks % 3 == 0 cl = ks / 2 + 1 p = (ks - cl) / 2 h0 = np.ones((ks, ks)) h0[:, 0:p] = -1 h0[:, -p:] = -1 h2 = h0.transpose() cl = ks / 2 xx, yy = np.meshgrid(np.arange(ks), np.arange(ks)) h1 = np.ones((ks, ks)) * -1 h1[(xx - yy < cl) & ( yy - xx < cl)] = 1 h3 = np.fliplr(h1) k0 = np.sum(h0) k1 = np.sum(h1) k2 = np.sum(h2) k3 = np.sum(h3) h0 = h0 / k0 h2 = h2 / k2 h1 = h1 / k1 h3 = h3 / k3 h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) x = v c0 = conv2d(x, h0, 'same', boundary='symm') c1 = conv2d(x, h1, 'same', boundary='symm') c2 = conv2d(x, h2, 'same', boundary='symm') c3 = conv2d(x, h3, 'same', boundary='symm') data = np.stack([c0, c1, c2, c3], axis=-1) data_out = np.max(data, axis=-1) max_index = np.argmax(data_out, axis=0) x = np.arange(data_out.shape[1]) y = max_index outd = data_out[y, x] outh = h[y, x] choice = (outh > 150) & (outh < 180) & (outd > 250) nx, ny = x[choice], y[choice] if nx.all(): return np.stack([nx, ny], axis=1) else: return np.array([])
def mainPostproc(argv): ''' Post-processing to make the glyphs "smoother" and do deduplication Default convolution kernel: Gaussian 3x3 ''' ag = argparse.ArgumentParser() ag.add_argument('--json', type=str, default='chafa8x8.raw.json') ag.add_argument('--save', type=str, default='chafa8x8.json') ag = ag.parse_args(argv) # load raw vectors centers = json.load(open(ag.json, 'r')) # [default kernel] Gaussian kernel (kernel size = 3) Kg = np.array([[1 / 16, 1 / 8, 1 / 16], [1 / 8, 1 / 4, 1 / 8], [1 / 16, 1 / 8, 1 / 16]]) # mean value (glyphs tends to have round corner instead of sharp ones) Km = np.ones((3, 3)) / 9 # Select Kg as the default kernel K = Kg for (i, center) in enumerate(centers): center = np.array(center).reshape((8, 8)) center = conv2d(center, K, 'same').ravel() centers[i] = (center >= 0.5) centers = np.unique(np.array(centers), axis=0) print(' -> number of centers:', centers.shape[0]) centers = list( sorted([list(map(int, center)) for center in centers], key=lambda x: sum(x))) json.dump(centers, open(ag.save, 'w')) print(f'=> Result saved to {ag.save}')
def fprop(self, inputs): """Forward propagates activations through the layer transformation. For inputs `x`, outputs `y`, kernels `K` and biases `b` the layer corresponds to `y = conv2d(x, K) + b`. Args: inputs: Array of layer inputs of shape (batch_size, num_input_channels, image_height, image_width). Returns: outputs: Array of layer outputs of shape (batch_size, num_output_channels, output_height, output_width). """ batch_size = inputs.shape[0] num_output_channels = self.num_output_channels output_height = self.input_height - self.kernel_height + 1 output_width = self.input_width - self.kernel_width + 1 outputs = np.zeros( (batch_size, num_output_channels, output_height, output_width)) num_input_channels = self.num_input_channels for batch in range(batch_size): for num_output_channel in range(num_output_channels): for num_input_channel in range(num_input_channels): kernel = self.kernels[num_output_channel, num_input_channel, :, :] outputs[batch, num_output_channel, :, :] += conv2d( inputs[batch, num_input_channel, :, :], kernel, mode='valid') outputs[batch, num_output_channel, :, :] += self.biases[ num_output_channel] return outputs
def bprop(self, inputs, outputs, grads_wrt_outputs): """Back propagates gradients through a layer. Given gradients with respect to the outputs of the layer calculates the gradients with respect to the layer inputs. Args: inputs: Array of layer inputs of shape (batch_size, num_input_channels, input_height, input_width). outputs: Array of layer outputs calculated in forward pass of shape (batch_size, num_output_channels, output_height, output_width). grads_wrt_outputs: Array of gradients with respect to the layer outputs of shape (batch_size, num_output_channels, output_height, output_width). Returns: Array of gradients with respect to the layer inputs of shape (batch_size, num_input_channels, input_height, input_width). """ grads_wrt_inputs = np.zeros(inputs.shape) batch_size = inputs.shape[0] for batch in range(batch_size): for num_input_channel in range(self.num_input_channels): for num_output_channel in range(self.num_output_channels): kernel = self.kernels[num_output_channel, num_input_channel, ::-1, ::-1] grads_wrt_inputs[batch, num_input_channel, :, :] += conv2d( grads_wrt_outputs[batch, num_output_channel, :, :], kernel) return grads_wrt_inputs
def _policy(self, state): if len(state.history) == 0: return [(7, 7)] elif len(state.history) == 1: return [(6, 7), (6, 6)] adjacent = conv2d(np.abs(state.board), FILTER, mode="same") actions = [] ops = self.ops[state.player] in_lose_danger = ops.get_op_live_four(state.features) > 0 in_four_danger = ops.get_op_four(state.features) > 0 in_three_danger = ops.get_op_three(state.features) > 0 for x in range(15): for y in range(15): if adjacent[x, y] > 0 and state.board[x, y] == 0: new, old = diff(state, x, y) if state.player == -1 or ( "violate" not in new and new["-o-oo-"] + new["-ooo-"] < 2 and new["four-o"] + new["-oooo-"] + new["-oooox"] < 2): if new["win-o"] > 0 or new["win-x"] > 0: return [(x, y)] elif not in_lose_danger: if in_four_danger: if ops.get_op_four( state.features) == ops.get_op_four( old): actions.append((x, y, 0, 0)) elif ops.get_my_live_four(new) > 0: return [(x, y)] elif in_three_danger: if ops.get_my_four(new) > 0: actions.append((x, y, 0, 0)) elif ops.get_op_three( state.features) == ops.get_op_three( old): actions.append((x, y, 0, 0)) elif len(new) + len(old) > 0: state.player = -state.player _new, _old = diff(state, x, y) _ops = self.ops[state.player] state.player = -state.player actions.append( (x, y, self._score(deepcopy(state.features), new, old, -state.player), self._score(deepcopy(state.features), _new, _old, state.player))) if len(actions) == 0: return actions random.shuffle(actions) width = self.max_width // 2 return list( set( list( map(lambda t: (t[0], t[1]), sorted(actions, key=lambda t: t[2], reverse=True))) [:width] + list( map(lambda t: (t[0], t[1]), sorted(actions, key=lambda t: t[3], reverse=True)))[:width]))
def grads_wrt_params(self, inputs, grads_wrt_outputs): """Calculates gradients with respect to layer parameters. Args: inputs: array of inputs to layer of shape (batch_size, input_dim) grads_wrt_to_outputs: array of gradients with respect to the layer outputs of shape (batch_size, num_output_channels, output_height, output_width). Returns: list of arrays of gradients with respect to the layer parameters `[grads_wrt_kernels, grads_wrt_biases]`. """ grads_wrt_weights = np.zeros(self.kernels.shape) grads_wrt_biases = np.zeros(self.biases.shape) batch_size = inputs.shape[0] num_output_channels = self.num_output_channels num_input_channels = self.num_input_channels for num_output_channel in range(num_output_channels): for num_input_channel in range(num_input_channels): for batch in range(batch_size): kernel = grads_wrt_outputs[batch, num_output_channel, :, :] grads_wrt_weights[num_output_channel, num_input_channel, :, :] += conv2d( inputs[batch, num_input_channel, :, :], kernel, mode='valid') for batch in range(batch_size): grads_wrt_biases[num_output_channel] += grads_wrt_outputs[ batch, num_output_channel, :, :].sum() return [grads_wrt_weights, grads_wrt_biases]
def numpy_update_bw(alive_map, conv_kernel): """Perform one step of a cellular automaton with 1 channel""" # Convolve conv_result = conv2d(alive_map, conv_kernel, mode='same') # Apply game rules condition = (conv_result > 0) * 2 - 1 np.copyto(alive_map, condition)
def is_won(self): board = self.board length_connect = self.length_connect won = 0 for pattern in self.win_patterns: res = conv2d(board, pattern, mode='same') idx_max = np.unravel_index(np.abs(res).argmax(), res.shape) if abs(res[idx_max]) == length_connect: won = 2 * int(res[idx_max] > 0) - 1 return won
def get_convolve_kernel(imgarray, kernel): """ imgarray: [nx,ny] 2d array like image kernel: [n,n,N] eg. [21,21,49] mode: scipy.special.convolve2d mode 'same' gives same dimension as imgarray """ convec = np.zeros((kernel.shape[2], imgarray.size)) for i in range(kernel.shape[2]): conv = conv2d(imgarray, kernel[:, :, i], mode='same').ravel() convec[i, :] = conv return convec
def Sinc_Filter(high_res): SINC_SIZE = 11 RANGE = 6 x = np.linspace(-RANGE, RANGE, SINC_SIZE) xx = np.outer(x, x) sinc_func = np.sinc(xx) sinc_func = sinc_func / np.sum(sinc_func) # plt.figure() # plt.title('Sinc') # plt.imshow(sinc_func, cmap='gray') sinc_img = conv2d(high_res, sinc_func) return sinc_img
def numpy_update_sep(alive_map1, alive_map2, alive_map3, conv_kernel, channel_mixing=None): """Perform one step of a cellular automaton with 3 independent channels""" # Convolve if channel_mixing is None: conv_result1 = conv2d(alive_map1, conv_kernel, mode='same') conv_result2 = conv2d(alive_map2, conv_kernel, mode='same') conv_result3 = conv2d(alive_map3, conv_kernel, mode='same') elif isinstance(channel_mixing, str) and channel_mixing == '3D': maps = np.stack([alive_map1, alive_map2, alive_map3], axis=-1) conv_results = convolve(maps, conv_kernel, mode='same') conv_result1, conv_result2, conv_result3 = list( map(np.squeeze, np.dsplit(conv_results, 3))) else: conv_kernel1, conv_kernel2, conv_kernel3 = list( map(np.squeeze, np.dsplit(conv_kernel, 3))) conv_result1 = conv2d(alive_map1, conv_kernel1, mode='same') conv_result2 = conv2d(alive_map2, conv_kernel2, mode='same') conv_result3 = conv2d(alive_map3, conv_kernel3, mode='same') conv_results = np.stack([conv_result1, conv_result2, conv_result3], axis=-1) conv_results = np.matmul(conv_results, channel_mixing) conv_result1, conv_result2, conv_result3 = list( map(np.squeeze, np.dsplit(conv_results, 3))) # Apply game rules condition1 = (conv_result1 > 0) * 2 - 1 condition2 = (conv_result2 > 0) * 2 - 1 condition3 = (conv_result3 > 0) * 2 - 1 np.copyto(alive_map1, condition1) np.copyto(alive_map2, condition2) np.copyto(alive_map3, condition3)
def solve(self, x, iter_max, tol_max, model_path): pre = np.zeros(shape=x.shape, dtype=np.float64) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' for i in range(x.shape[0]): x_batch = np.squeeze(x[i], -1) kt_y = conv2d(x_batch, self.kernel.transpose(), mode='same') u = 0.5 from model.unrolled import Net fre_k = Net.kernel_pad(self.kernel, self.input_shape) fre_k = np.fft.fft2(fre_k) fre_k = 2*u + np.square(np.abs(fre_k)) result = x_batch for iter_ in range(iter_max): result_last = result result = kt_y + 2*u*result result = np.fft.fft2(result) result = result / fre_k result = np.fft.ifft2(result) result = np.abs(result) result = np.expand_dims(result, 0) result = np.expand_dims(result, -1) result = self.predict(result, 1, model_path=model_path) result = np.squeeze(result, 0) result = np.squeeze(result, -1) tol_ = np.mean(np.square(result - result_last)) print(tol_) if tol_ < tol_max: verbose_info = "[Info] Prediction Output: Break in [%d] Iter" % (iter_ + 1) print(verbose_info) break result = np.expand_dims(result, -1) pre[i] = result verbose_info = "[Info] Prediction Output: Batch = [%d]" % (i + 1) print(verbose_info) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' return pre
def convolve_2d(array_2d, kernel, mode): """ Осуществляет свертку двумерного массива array_2d с ядром kernel либо по строкам, либо по столбцам. Keyword arguments: array_2d - двумерный массив. kernel - одномерный массив. Преполагается, что это z-преобразование фильтра. mode - параметр, отвечающий за то, с чем будет свертка: со строками или со стобцами. 'verical' - по стобцам, 'horizontal' - по строкам. Свертка производится с дополнениеми нулями справа. """ res = np.array(array_2d) kernel = np.array(kernel) if (mode == 'horizontal'): res = conv2d(res, kernel[None, :]) res = res[:, len(kernel) - 1:] if (mode == 'vertical'): res = conv2d(res, kernel[:, None]) res = res[len(kernel) - 1:, :] return res
def random_action(self, state): adjacent = conv2d(np.abs(state.board), FILTER, mode="same") prob = np.logical_and(state.board == 0, adjacent > 0).astype(float) for x in range(15): for y in range(15): if prob[x, y] > 0: new, old = diff(state, x, y) if ("violate" in new or new["-o-oo-"] + new["-ooo-"] >= 2 or new["four-o"] + new["-oooo-"] + new["-oooox"] >= 2): prob[x, y] = 0 prob = (prob / prob.sum()).reshape(225) return np.unravel_index(np.random.choice(225, p=prob), dims=(15, 15))
def image(self): B=np.zeros([self.numk,self.shape[0],self.shape[1]],dtype=np.complex128) for i in range(self.numk): B[i]=conv2d(self.template,self.flowfilt[i],mode='same') A=np.zeros([self.shape[0],self.shape[1]],dtype=np.complex128) for i in np.arange(0,self.shape[0],self.block): for j in np.arange(0,self.shape[1],self.block): rend=min(i+self.block,self.shape[0]-1) cend=min(j+self.block,self.shape[1]-1) cho=randint(self.numk) A[i:rend,j:cend]=B[cho,i:rend,j:cend] return A
def gaussian_kernel(kernel_size): """ create gaussian matrix Parameters ---------- :param kernel_size Returns ------- :return gaussian matrix """ gaussian = binomial_ker = np.array([[1, 1]]) while gaussian.shape[1] < kernel_size: gaussian = conv2d(gaussian, binomial_ker) return 1 / gaussian.sum() * gaussian
def computeSingleOpponency(img, filters): h, w, c = img.shape num_phases, rfsize, rfsize, _, num_channels, num_rotations = filters.shape output_size = (h, w, num_channels, num_rotations, num_phases) s = np.empty(output_size, dtype=np.float32) for p in range(num_phases): for j in range(num_channels): for i in range(num_rotations): for k in range(c): tmp = conv2d(img[:, :, k], np.squeeze(filters[p, :, :, k, j, i]), boundary='fill', mode='same') s[:, :, j, i, p] = s[:, :, j, i, p] + tmp s[np.where(s < 0)] = 0 return s
def winner(self, b, state): # Check who wins for n-mok game # Inputs # b: current board state, 0: no stone, 1: black, 2: white # state: extra state # Usage) [r, end_game, s1, s2]=winner(b, state) # r: 0 tie, 1: black wins, 2: white wins # end_game # if end_game==1, game ends # if end_game==0, game ends if no more move is possible for the current player # if end_game==-1, game ends if no moves are possible for both players # s1: score for black # s2: score for white # total number of games ng = b.shape[2] n = self.n r = np.zeros((ng)) fh = np.ones((n, 1)) fv = np.transpose(fh) fl = np.identity(n) fr = np.fliplr(fl) for j in range(ng): c = (b[:, :, j] == 1) if np.amax(conv2d(c, fh, mode = 'valid') == n)\ or np.amax(conv2d(c, fv, mode = 'valid') == n)\ or np.amax(conv2d(c, fl, mode = 'valid') == n)\ or np.amax(conv2d(c, fr, mode = 'valid') == n): r[j] = 1 c = (b[:, :, j] == 2) if np.amax(conv2d(c, fh, mode = 'valid') == n)\ or np.amax(conv2d(c, fv, mode = 'valid') == n)\ or np.amax(conv2d(c, fl, mode = 'valid') == n)\ or np.amax(conv2d(c, fr, mode = 'valid') == n): r[j] = 2 return r, r > 0, r == 1, r == 2
def __init__(self,params=params_default): self.pixel=params['pixel'] self.shape=params['shape'] self.ev=Envelope(params).GMenvelope() self.sdv_phi=params['sdv_phi'] self.sdv_lp=params['sdv_lp'] self.kerflow=params['kerflow'] self.sdv_df=params['sdv_df'] self.minf=params['minf'] self.block=params['block'] self.numk=params['numk'] #self.maxAmpt=params['maxAmpt'] AM=normal(0,1,self.shape)+1j*normal(0,1,self.shape) #convolve gaussian and normal mlp=max(int(3*self.sdv_lp/self.pixel[0]),int(3*self.sdv_lp/self.pixel[1])) x=np.arange(-mlp,mlp+1)*self.pixel[1] y=np.arange(-mlp,mlp+1)*self.pixel[0] x1,y1=np.meshgrid(x,y) lpfilter=np.exp(-x1**2/2/self.sdv_lp**2-y1**2/2/self.sdv_lp**2) #get amplitude of the tissue A=np.abs(conv2d(AM*self.ev,lpfilter,mode='same')) #generate phase field for tissue phi=uniform(0,2*np.pi) #phi=0 dp=np.random.normal(0,self.sdv_phi,A.shape)*np.pi/180 self.template=A*np.exp(1j*(phi+dp)) #self.template=self.template*self.maxAmpt/np.max(np.abs(self.template)) #generate the flowing filter self.flowfilt=np.zeros([self.numk,self.kerflow,self.kerflow]) for i in range(self.numk): tmp=rand(self.kerflow,self.kerflow) tmp=tmp/np.sum(tmp) self.flowfilt[i]=tmp
def computeDoubleOpponency(s, num_channels, filters): ds = np.empty(s.shape, dtype=np.float32) num_phases, rfsize, rfsize, num_rotations = filters.shape for p in range(num_phases): for j in range(num_channels): for i in range(num_rotations): ds[:, :, j, i, p] = conv2d(s[:, :, j, i, p], filters[p, :, :, i], boundary='fill', mode='same') ds[np.where(ds < 0)] = 0 tmpdc = np.empty((s.shape[0], s.shape[1], num_channels, num_rotations)) for i in range(num_phases): tmpdc = tmpdc + ds[:, :, :, :, i] / num_phases dc = np.empty((s.shape[0], s.shape[1], num_channels / 2, num_rotations)) for j in range(num_channels / 2): dc[:, :, j, :] = np.sqrt( np.square(tmpdc[:, :, j, :]) + np.square(tmpdc[:, :, j + num_channels / 2, :])) return ds, dc
def get_conv(A, B): C = np.zeros((A.shape[1] + B.shape[1] - 1, A.shape[2] + B.shape[2] - 1)) for l in range(A.shape[0]): C += conv2d(A[l], B[l], 'full') return C
def denoise(labels): labels.shape = (80, 60) k1 = np.asarray([[1, 1, 1], [1, 0, 1], [1, 1, 1]], dtype=np.uint8) k3 = np.asarray([[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]], dtype=np.uint8) k5 = np.asarray([[1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) k7 = np.asarray([[1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) k9 = np.asarray([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) k11 = np.asarray([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) k13 = np.asarray([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=np.uint8) c_k1 = conv2d(labels, k1, mode='same', fillvalue=1) highs = c_k1 == 8 labels[highs] = 1 c_k3 = conv2d(labels, k3, mode='same', fillvalue=1) for i in range(1, 79): for j in range(1, 59): if c_k3[i][j] == 16: labels[i-1:i+2, j-1:j+2] = 1 c_k5 = conv2d(labels, k5, mode='same', fillvalue=1) for i in range(2, 78): for j in range(2, 58): if c_k5[i][j] == 24: labels[i-2:i+3, j-2:j+3] = 1 c_k7 = conv2d(labels, k7, mode='same', fillvalue=1) for i in range(3, 77): for j in range(3, 57): if c_k7[i][j] == 32: labels[i-3:i+4, j-3:j+4] = 1 c_k9 = conv2d(labels, k9, mode='same', fillvalue=1) for i in range(4, 76): for j in range(4, 56): if c_k9[i][j] == 40: labels[i-4:i+5, j-4:j+5] = 1 c_k11 = conv2d(labels, k11, mode='same', fillvalue=1) for i in range(5, 75): for j in range(5, 55): if c_k11[i][j] == 48: labels[i-5:i+6, j-5:j+6] = 1 c_k13 = conv2d(labels, k13, mode='same', fillvalue=1) for i in range(6, 74): for j in range(6, 54): if c_k13[i][j] == 56: labels[i-6:i+7, j-6:j+7] = 1 # upper border for i in range(0, 6): for j in range(6, 54): if c_k13[i][j] == 56: labels[:i+7, j-6:j+7] = 1 # bottom border for i in range(74, 80): for j in range(6, 54): if c_k13[i][j] == 56: labels[i-6:, j-6:j+7] = 1 # left border for i in range(6, 74): for j in range(0, 6): if c_k13[i][j] == 56: labels[i-6:i+7, :j+7] = 1 # right border for i in range(6, 74): for j in range(54, 60): if c_k13[i][j] == 56: labels[i-6:i+7, j-6:] = 1 return labels
def __init__(self, originalImage, psfs): self.psfs = psfs self.imgs = [conv2d(originalImage, mat, 'same') for mat in psfs] self.plot()
def adjacent(self): return conv2d(np.abs(self.board), FILTER, mode="same")
'GROUP_WIDTH':4 } globalsize=(8,8) localsize=(4,4) program=cl.Program(context,kernel_code).build() conv=program.conv_2D i_cpu=np.random.randint(0,10,size=(4,4)).astype(np.int32) o_cpu=np.empty((4,4)).astype(np.int32) i_gpu=cl.Buffer(context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=i_cpu) o_gpu=cl.Buffer(context,cl.mem_flags.WRITE_ONLY, o_cpu.nbytes) mask_gpu=cl.Buffer(context,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=mask_reversed) conv(queue,globalsize,localsize,i_gpu,o_gpu,mask_gpu,np.uint32(4),np.uint32(4),np.uint32(3)) queue.finish() cl.enqueue_copy(queue,o_cpu,o_gpu) o_reference=conv2d(i_cpu,mask,mode='same') print('===Part1-(i)===') print('===Input Matrix===') print(i_cpu) print('===Input Mask===') print(mask) print('===GPU Output===') print(o_cpu) print('===CPU Output===') print(o_reference) print 'Validation result:',np.allclose(o_cpu,o_reference) #=====================================================================# #Part1-(ii) Test with different matrix size, keeping kernel constant. #Specify matrix size to be used print('===Part1-(ii)===') size=np.array([[20,40,60,80,100,120,140,160,180,200,300,400,500,600],
#If the image is not gray scale #img = img[:,:,0] #parameter setting timestep = 1 mu = 0.2 / timestep iter_inner = 5 iter_outer = 20 lamda = 5 alfa = -3 #alfa =1.5 epsilon = 1.5 sigma = 0.8 #sigma = 1.5 G = gauss2D((15, 15), sigma) img_smooth = conv2d(img, G, 'same') ix, iy = np.gradient(img_smooth) f = np.square(ix) + np.square(iy) g = 1.0 / (1 + f) #initialize LSF as binary step function c0 = 2 initialLSF = c0 * np.ones(img.shape) #generate the initial region R0 as two rectangles initialLSF[24:35, 19:25] = -c0 initialLSF[24:35, 39:50] = -c0 phi = initialLSF #imshow(phi) #ipdb.set_trace() #print(phi[24:35, 39:50]) #phi0 = drlse_edge(phi, g, lamda, mu, alfa, lamda, epsilon, iter_inner, 'single-well')