def __setattr__(self, attr, val): if attr in SharedObject.RESERVED: super().__setattr__(attr, val) return with self.load(): # loads it if not already loaded if self.fs.verbose: util.gray(f'setattr {attr}') self.attr_dict[attr] = val
def try_metacommands(self, line): if len(line) == 0: return False if line.strip() == '%': if self.mode != 'speedy': self.mode = 'speedy' else: self.mode = 'normal' self.update_banner() u.gray("metacommand registered") return True # handle metacommands if line[0] == '!': if line.strip() == '!print': u.y('\n'.join(self.code)) if line.strip() == '!debug': self.debug = not self.debug if line.strip() == '!verbose_exc': self.verbose_exceptions = not self.verbose_exceptions if line.strip() == '!reset': self.__init__(None) #if line.strip() == '!cleanup': #clears the /repl-tmpfiles directory #u.clear_repl_tmpfiles() #os.makedirs(os.path.dirname(self.tmpfile)) #if line.strip() == '!which': #print(self.tmpfile) if line.strip() == '!help': u.b('Currently implemented macros listing:') u.p('\n'.join(codegen.macro_argc.keys())) u.gray("metacommand registered") return True return False
def __getattr__(self, attr): if attr in SharedObject.RESERVED: super().__getattr__(attr) with self.load(): # loads it if not already loaded if self.fs.verbose: util.gray(f'getattr {attr}') try: return self.attr_dict[attr] except KeyError: pass # if we raised AttributeError in here the error message would look bad by blaming it on the KeyError raise AttributeError(str(attr))
def hough(): img = util.load_img('img/sudoku.png') img_gray = util.gray(img) edges = cv.Canny(img_gray, 100, 200) """cv.HoughLinesP""" lines = cv.HoughLinesP(edges, 1, np.pi / 180, 200, minLineLength=100, maxLineGap=10) for line in lines: x1, y1, x2, y2 = line[0] cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2) """cv.HoughLines""" # lines = cv.HoughLines(edges, 1, np.pi / 180, 200) # for line in lines: # rho, theta = line[0] # a = np.cos(theta) # b = np.sin(theta) # x0 = a * rho # y0 = b * rho # x1 = int(x0 + 1000 * (-b)) # y1 = int(y0 + 1000 * (a)) # x2 = int(x0 - 1000 * (-b)) # y2 = int(y0 - 1000 * (a)) # cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) """""" util.show(img)
def __fit_one(self, link, content_layers, style_grams): xp = self.xp link.zerograds() layers = self.model(link.x) if self.keep_color: trans_layers = self.model(util.gray(link.x)) else: trans_layers = layers loss_info = [] loss = Variable(xp.zeros((), dtype=np.float32)) for name, content_layer in content_layers: layer = layers[name] content_loss = self.content_weight * F.mean_squared_error(layer, Variable(content_layer.data)) loss_info.append(('content_' + name, float(content_loss.data))) loss += content_loss for name, style_gram in style_grams: gram = util.gram_matrix(trans_layers[name]) style_loss = self.style_weight * F.mean_squared_error(gram, Variable(style_gram.data)) loss_info.append(('style_' + name, float(style_loss.data))) loss += style_loss tv_loss = self.tv_weight * util.total_variation(link.x) loss_info.append(('tv', float(tv_loss.data))) loss += tv_loss loss.backward() self.optimizer.update() return loss_info
def __fit_one(self, link, content_layers, style_patches): xp = self.xp link.zerograds() layers = self.model(link.x) if self.keep_color: trans_layers = self.model(util.gray(link.x)) else: trans_layers = layers loss_info = [] loss = Variable(xp.zeros((), dtype=np.float32)) for name, content_layer in content_layers: layer = layers[name] content_loss = self.content_weight * F.mean_squared_error( layer, Variable(content_layer.data)) loss_info.append(('content_' + name, float(content_loss.data))) loss += content_loss for name, style_patch, style_patch_norm in style_patches: patch = trans_layers[name] near, size, size2 = util.nearest_neighbor_patch( patch, style_patch, style_patch_norm) style_loss = self.style_weight * ( F.sum(F.square(patch)) * size2 / size - 2 * F.sum(near) / size) loss_info.append(('style_' + name, float(style_loss.data))) loss += style_loss tv_loss = self.tv_weight * util.total_variation(link.x) loss_info.append(('tv', float(tv_loss.data))) loss += tv_loss loss.backward() self.optimizer.update() return loss_info
def create_query(img): orb = cv2.ORB_create() img = cv2.resize(img, None, fx=13, fy=13, interpolation=cv2.INTER_AREA) img = gray(img) # find the keypoints and descriptors with orb kp1, des1 = orb.detectAndCompute(img, None) if len(kp1) < 2: des1 = None return des1
def water(): """ 使用距离变换和分水岭来分割相互接触的物体 靠近对象中心的区域是前景,离对象远的区域是背景,不确定的区域是边界。 物体没有相互接触/只求前景 可用侵蚀消除了边界像素 到距离变换并应用适当的阈值 膨胀操作会将对象边界延伸到背景,确保background区域只有background 边界 = 能否确认是否是背景的区域 - 确定是前景的区域 """ img = util.load_img('img/coins.png') gray = util.gray(img) ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU) util.show(thresh) # noise removal kernel = np.ones((3, 3), np.uint8) opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2) # sure background area sure_bg = cv.dilate(opening, kernel, iterations=3) # Finding sure foreground area dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5) # 计算每个像素离最近0像素的距离 ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv.subtract(sure_bg, sure_fg) # Marker labelling 用0标记图像的背景 其他对象用从1开始的整数标记 ret, markers = cv.connectedComponents(sure_fg) """ 我们知道,如果背景标记为0,分水岭会将其视为未知区域。所以我们想用不同的整数来标记它。相反,我们将标记由未知定义的未知区域,为0。 """ # Add one to all labels so that sure background is not 0, but 1 markers = markers + 1 # Now, mark the region of unknown with zero markers[unknown == 255] = 0 markers = cv.watershed(img, markers) # 修改标记图像。边界区域将标记为-1 img[markers == -1] = [255, 0, 0] util.show(img, is_seq=True)
def input_from_file(banner): u.gray(f"input_from_file called") self.line_idx += 1 if self.line_idx >= len(self.lines): u.gray( f"Context switch: {self.prev_context.infile} -> {self.infile}" ) self.repl.context = self.prev_context self.repl.mode = self.old_mode # reset mode as exiting this context. Then again, it'll always be normal mode that calls `use` statements if self.repl.context == None: u.gray('Final context ended, exit()ing') exit(0) self.repl.update_banner() return self.repl.context.input( self.repl.banner ) # smoothly transition into the next input source line = self.lines[self.line_idx] print(f"compiling:{line}") return line
def add_context(self, infile): # REPL is infile=None u.gray(f'Adding context: {infile}') Context( self, infile ) # this will automatically add itself as our self.context if it succeeds in reading the infile etc
def plot_results(img, results, images, web=None): # results is a list of (index, score) tuples sorted by descending score, eg [(1, 100), (0, 0.7), (2, 15.6)] # create plot of original image and best matches fig, ax = plt.subplots(nrows=2, ncols=6, figsize=(32, 32), sharex=False, sharey=False) ((ax1, ax2, ax3, ax4, ax5, ax6), (ax7, ax8, ax9, ax10, ax11, ax12)) = ax result_cells = [ax3, ax4, ax5, ax6, ax9, ax10, ax11, ax12] # result_cells = [] # fig, ax = plt.subplots(ncols=6, nrows=2) gs = ax[1, 2].get_gridspec() ax1.remove() ax2.remove() ax7.remove() ax8.remove() ax1 = fig.add_subplot(gs[0:2, 0:2]) # fig7, f7_axs = plt.subplots(ncols=3, nrows=3) # gs = f7_axs[1, 2].get_gridspec() # # remove the underlying axes # for ax in f7_axs[1:, -1]: # ax.remove() ax1.imshow(gray(img), cmap=plt.cm.gray) ax1.set_title('Query Image', fontsize=20, y=1.0) # im_inx = 0 # for i in ax[]: # for j in i: # if im_inx >= len(images): # break # j.imshow(gray(images[results[im_inx][0]]), cmap=plt.cm.gray) # j.set_xlim([0,32]) # j.set_ylim([32,0]) # j.set_title('match score: ' + '%.1f'%(results[im_inx][1]), fontsize=20, y = 1.0) # im_inx += 1 for c_inx in range(len(result_cells)): if c_inx >= len(images): break result_cells[c_inx].imshow(gray(images[results[c_inx][0]]), cmap=plt.cm.gray) result_cells[c_inx].set_xlim([0, 32]) result_cells[c_inx].set_ylim([32, 0]) result_cells[c_inx].set_title('match score: ' + '%.1f' % (results[c_inx][1]), fontsize=20, y=1.0) # maximize the window and display plots fig.tight_layout() #mng = plt.get_current_fig_manager() #mng.window.state('zoomed') if not web: plt.show() else: rimg = io.BytesIO() plt.savefig(rimg) rimg.seek(0) return rimg