def drawLine(p1, p2, varargin): """Draws a line from point p1 to point p2 and holds the current figure """ plt.plot(np.column_stack(p1(1), p2(1)), np.column_stack(p1(2), p2(2)), varargin) show()
def _show_folder(self, index, path, goto, single_pane, other_group): if index != -1: choice = self.window.folders()[index] if path == choice: show(self.window, path, goto=goto, single_pane=single_pane, other_group=other_group) else: show(self.window, choice, single_pane=single_pane, other_group=other_group)
def open_item(self, fqn, window, new_view): if isdir(fqn): show(window, fqn, ignore_existing=new_view) elif exists(fqn): # ignore 'item <error>' self.last_created_view = window.open_file(fqn) else: sublime.status_message(u'File does not exist (%s)' % (basename(fqn.rstrip(os.sep)) or fqn))
def plotBoundary(theta, X, y): plotDecisionBoundary(theta, X.values, y.values) plt.title(r'$\lambda$ = ' + str(Lambda)) # Labels and Legend plt.xlabel('Microchip Test 1') plt.ylabel('Microchip Test 2') show()
def test_vsm(self): ds = Document.from_texts([documents.DOCUMENT_1, documents.DOCUMENT_2, documents.DOCUMENT_3]) vsm = VSM(ds) query = "nobel physics britian idea" show(query) results = vsm.query(query) show(results)
def run(self, edit): parent = dirname(self.path.rstrip(os.sep)) if parent != os.sep: parent += os.sep if parent == self.path: return view_id = (self.view.id() if reuse_view() else None) show(self.view.window(), parent, view_id, goto=basename(self.path.rstrip(os.sep)))
def train(self, episode_len, episode_nbr, behavior='balance'): tau = 0.001 speedup = 0.1 self.pos_log = [] for j in range(episode_nbr): self.run_episode(episode_len,tau,behavior,'train') if (j+1)%100 == 0: print "%i episodes run" % (j+1) self.run_episode(episode_len,tau,behavior,'show') show(behavior+'.html',self.pos_log,tau/speedup)
def goto_directory(self, filenames, window, new_view): '''If reuse view is turned on and the only item is a directory, refresh the existing view''' if new_view and reuse_view(): return False fqn = filenames[0] if len(filenames) == 1 and isdir(fqn): show(self.view.window(), fqn, view_id=self.view.id()) return True elif fqn == PARENT_SYM: self.view.window().run_command("dired_up") return True return False
def run(tau,T,playback_speedup=1): assert tau < 0.1 assert tau > 0 assert T > 0 bot = Robot() t = 0 while t < T: bot.next_pos(tau) t += tau #print '\nt =', t, 'pos:\n', bot.q, bot.q_d if bot.fell(): break show('no_control.html',bot.state_log,tau/playback_speedup)
def run(self, immediate=False, single_pane=False, project=False): path = self._determine_path() if project: folders = self.window.folders() if len(folders) == 1: path = folders[0] elif folders: folders = [ [basename(f), f] for f in folders] self.window.show_quick_panel(folders, self._show_folder) return if immediate: show(self.window, path, single_pane=single_pane) else: prompt.start('Directory:', self.window, path, self._show)
def plotData(X, y): """plots the data points with + for the positive examples and o for the negative examples. X is assumed to be a Mx2 matrix. Note: This was slightly modified such that it expects y = 1 or y = 0 """ plt.figure() # Find Indices of Positive and Negative Examples pos = np.where(y==1, True, False).flatten() neg = np.where(y==0, True, False).flatten() # Plot Examples plt.plot(X[pos,0], X[pos, 1], 'k+', linewidth=1, markersize=7) plt.plot(X[neg,0], X[neg, 1], 'ko', color='y', markersize=7) show()
def visualizeFit(X, mu, sigma2): """ This visualization shows you the probability density function of the Gaussian distribution. Each example has a location (x1, x2) that depends on its feature values. """ n = np.linspace(0,35,71) X1 = np.meshgrid(n,n) Z = multivariateGaussian(np.column_stack((X1[0].T.flatten(), X1[1].T.flatten())),mu,sigma2) Z = Z.reshape(X1[0].shape) plt.plot(X[:, 0], X[:, 1],'bx') # Do not plot if there are infinities if not isinf(np.sum(Z)): plt.contour(X1[0], X1[1], Z, 10.0**np.arange(-20, 0, 3).T) show()
def run(self, edit, new_view=0, other_group=0, preview=0, and_close=0, inline=0): path = self.path filenames = self.get_selected() if not new_view else self.get_marked() or self.get_selected() # If reuse view is turned on and the only item is a directory, refresh the existing view. if not new_view and reuse_view(): fqn = join(path, filenames[0]) if inline and '<empty>' == fqn[~6:]: return if len(filenames) == 1 and isdir(fqn): if inline: # if directory was unfolded, then itРђЎll be folded and unfolded again self.view.run_command('dired_fold', {'update': True}) show(self.view.window(), fqn, view_id=self.view.id(), inline=inline) return elif len(filenames) == 1 and filenames[0] == PARENT_SYM: self.view.window().run_command("dired_up") return if other_group or preview or and_close: # we need group number of FB view, hence twice check for other_group dired_view = self.view nag = self.view.window().active_group() w = self.view.window() for filename in filenames: fqn = join(path, filename.replace('<empty>', '').rstrip()) if exists(fqn): # ignore 'item <error>' if isdir(fqn): show(w, fqn, ignore_existing=new_view) else: if preview: w.focus_group(self._other_group(w, nag)) v = w.open_file(fqn, sublime.TRANSIENT) w.set_view_index(v, self._other_group(w, nag), 0) w.focus_group(nag) w.focus_view(dired_view) break # preview is possible for a single file only else: v = w.open_file(fqn) if other_group: w.focus_view(dired_view) w.set_view_index(v, self._other_group(w, nag), 0) w.focus_view(v) if and_close: w.focus_view(dired_view) w.run_command("close") w.focus_view(v)
def on_pick_point(self, index): if index == -1: return name, target = self.points[index] if exists(target) and isdir(target) and target[-1] == os.sep: show(self.view.window(), target, view_id=self.view.id()) status_message("Jumping to point '{0}' complete".format(name)) else: # workaround ST3 bag https://github.com/SublimeText/Issues/issues/39 self.view.window().run_command('hide_overlay') msg = u"Can't jump to '{0} → {1}'.\n\nRemove that jump point?".format(name, target) if ok_cancel_dialog(msg): points = load_jump_points() del points[name] save_jump_points(points) status_message(u"Jump point '{0}' was removed".format(name)) self.view.run_command('dired_refresh')
def run(self, edit): path = self.path parent = dirname(path.rstrip(os.sep)) if parent != os.sep and parent[1:] != ':\\': # need to avoid c:\\\\ parent += os.sep if parent == path and NT: parent = 'ThisPC' elif parent == path: return elif path == 'ThisPC\\': self.view.run_command('dired_refresh') return view_id = (self.view.id() if reuse_view() else None) goto = basename(path.rstrip(os.sep)) or path show(self.view.window(), parent, view_id, goto=goto)
def run_wss(T, speedup=0.1, tau=0.001): bot = Robot() bot.q = np.array([0.4, 0., 1.7, 1.35]) bot.psi = 0.5 t = 0. while t < T: sim_time, action = bot.balance_policy(tau,0.2) bot.psi += action/10. print "action:",action,"bot state:",bot.q, bot.q_d [ bot.next_pos(tau) for _ in range(5)] t += tau*5 bot.pos_log.append(tuple(bot.q)) if bot.is_down(): break show('short_sims.html',bot.pos_log,tau/speedup)
def run(self, immediate=False, single_pane=False, project=False, other_group=False): path, goto = self._determine_path() if project: folders = self.window.folders() if len(folders) == 1: path = folders[0] elif folders: names = [basename(f) for f in folders] longest_name = max([len(n) for n in names]) for i, f in enumerate(folders): name = names[i] offset = ' ' * (longest_name - len(name) + 1) names[i] = u'%s%s%s' % (name, offset, self.display_path(f)) self.window.show_quick_panel(names, lambda i: self._show_folder(i, path, goto, single_pane, other_group), sublime.MONOSPACE_FONT) return if immediate: show(self.window, path, goto=goto, single_pane=single_pane, other_group=other_group) else: prompt.start('Directory:', self.window, path, self._show)
def displayData(X): """displays 2D data stored in X in a nice grid. It returns the figure handle h and the displayed array if requested.""" # Compute rows, cols m, n = X.shape example_width = round(np.sqrt(n)) example_height = (n / example_width) # Compute number of items to display display_rows = np.floor(np.sqrt(m)) display_cols = np.ceil(m / display_rows) # Between images padding pad = 1 # Setup blank display display_array = - np.ones((pad + display_rows * (example_height + pad), pad + display_cols * (example_width + pad))) # Copy each example into a patch on the display array curr_ex = 0 for j in np.arange(display_rows): for i in np.arange(display_cols): if curr_ex > m: break # Get the max value of the patch max_val = np.max(np.abs(X[curr_ex, : ])) rows = [pad + j * (example_height + pad) + x for x in np.arange(example_height+1)] cols = [pad + i * (example_width + pad) + x for x in np.arange(example_width+1)] display_array[min(rows):max(rows), min(cols):max(cols)] = X[curr_ex, :].reshape(example_height, example_width) / max_val curr_ex = curr_ex + 1 if curr_ex > m: break # Display Image display_array = display_array.astype('float32') plt.imshow(display_array.T) plt.set_cmap('gray') # Do not show axis plt.axis('off') show()
def plotDataPoints(X, idx): """plots data points in X, coloring them so that those with the same index assignments in idx have the same color """ pass # Create palette # palette = hsv(K + 1) # colors = palette(idx, :) # # # Plot the data # c = dict(enumerate(np.eye(3))) # colors=idx map = plt.get_cmap("jet") idxn = idx.astype("float") / max(idx.astype("float")) colors = map(idxn) plt.scatter(X[:, 0], X[:, 1], 15, edgecolors=colors, marker="o", facecolors="none", lw=0.5) show()
def run(self, edit): pt = self.view.sel()[0].a row, col = self.view.rowcol(pt) points = [[n, t] for n, t in jump_points()] current_project = [points[row - 3][1]] settings = load_settings('dired.sublime-settings') smart_jump = settings.get('dired_smart_jump', False) if smart_jump and len(self.view.window().views()) == 1: show(self.view.window(), current_project[0]) else: self.view.run_command("dired_open_in_new_window", {"project_folder": current_project}) def close_view(view): if ST3: view.close() else: view.window().run_command("close_file") sublime.set_timeout(close_view(self.view), 100)
def run(self, edit, new_view=False, other_group='', preview='', and_close=''): path = self.path filenames = self.get_selected() if not new_view else self.get_marked() or self.get_selected() # If reuse view is turned on and the only item is a directory, refresh the existing view. if not new_view and reuse_view(): if len(filenames) == 1 and isdir(join(path, filenames[0])): fqn = join(path, filenames[0]) show(self.view.window(), fqn, view_id=self.view.id()) return elif len(filenames) == 1 and filenames[0] == PARENT_SYM: self.view.window().run_command("dired_up") return if other_group or preview or and_close: # we need group number of FB view, hence twice check for other_group dired_view = self.view nag = self.view.window().active_group() w = self.view.window() for filename in filenames: fqn = join(path, filename) if exists(fqn): # ignore 'item <error>' if isdir(fqn): show(w, fqn, ignore_existing=new_view) else: if preview: w.focus_group(self._other_group(w, nag)) v = w.open_file(fqn, sublime.TRANSIENT) w.set_view_index(v, self._other_group(w, nag), 0) w.focus_group(nag) w.focus_view(dired_view) break # preview is possible for a single file only else: v = w.open_file(fqn) if other_group: w.focus_view(dired_view) w.set_view_index(v, self._other_group(w, nag), 0) w.focus_view(v) if and_close: w.focus_view(dired_view) w.run_command("close") w.focus_view(v)
def run(self, edit, new_view=False): path = self.path filenames = self.get_selected() # If reuse view is turned on and the only item is a directory, refresh the existing view. if not new_view and reuse_view(): if len(filenames) == 1 and isdir(join(path, filenames[0])): fqn = join(path, filenames[0]) show(self.view.window(), fqn, view_id=self.view.id()) return elif len(filenames) == 1 and filenames[0] == PARENT_SYM: self.view.window().run_command("dired_up") return for filename in filenames: fqn = join(path, filename) if isdir(fqn): show(self.view.window(), fqn, ignore_existing=new_view) else: self.view.window().open_file(fqn)
def _getKeywordNews_sub(self, keyword): newsList = self.html.find_all('div', {'class':'archive clearfix'}) nextPage = True for inews in newsList[:]: Time = inews.find('span').text Time = re.match('.*([0-9]{4}.*[0-9]{2})', Time).group(1) newsTime = pd.to_datetime(Time) newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find('h2').text) url = inews.find('a')['href'] show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.outNews = self.outNews.append(data) return nextPage
def do_open(self, command): """Open a paper from the paperdir given a docid iota> open docid:1 """ c = parse_command(command) try: sexps = show(self.database, **c) except TypeError: pass else: self.print_sexp(sexps)
def arithmetic_meanProcess(image, view=False): """Arithmetic_meanProcess using filter only shape [3,3] Args: image: The original image view: The flag to decided to show the result image Set to False default Returns: rets: The list contains the filted image """ from hw2_filter import filter2d retImage = filter2d(image, np.ones((3,3), dtype=int)) for x in range(retImage.shape[0]): for y in range(retImage.shape[1]): retImage[x][y] = int(retImage[x][y]/9) if view: show(image, retImage, "arithmetic_mean") return retImage
def run(T, speedup=0.1, tau=0.001): assert tau < 0.1 assert tau > 0 assert T > 0 bot = Robot() bot.q = np.array([0.6, 0., 2.8, 1.5]) # bot.psi = 1.1 """ bot.q = np.array([0.7, 0., 2.0, 1.255]) # salto settings bot.psi = 0.9 """ t = 0. while t < T: #bot.psi = get_psi(get_policy(EPS,bot.q,bot.q_d)) bot.next_pos(tau) t += tau bot.pos_log.append(tuple(bot.q)) if bot.is_down(): break show('no_control.html',bot.pos_log,tau/speedup)
def _getKeywordNews_sub(self, keyword): self.driver.implicitly_wait(30) newsBox = self.driver.find_elements(By.CSS_SELECTOR, 'div.stories-container')[1] self.newsList = newsList = newsBox.find_elements(By.CSS_SELECTOR, 'div.flex-feature') nextPage = True for inews in newsList[:]: Time = inews.find_element(By.CSS_SELECTOR, 'div.timestamp').text.replace('出版時間: ','') newsTime = pd.to_datetime(Time) newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find_element(By.CSS_SELECTOR, 'span.desktop-blurb').text) url = inews.find_element(By.CSS_SELECTOR, 'a.story-card').get_attribute('href') show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.outNews = self.outNews.append(data) return nextPage
def _getKeywordNews_sub(self, keyword): self.driver.implicitly_wait(30) newsBox = self.driver.find_element(By.CSS_SELECTOR, 'div.centralContent #jsMainList') self.newsList = newsList = newsBox.find_elements(By.CSS_SELECTOR, 'li') nextPage = True for inews in newsList[:]: Time = inews.find_element(By.CSS_SELECTOR, 'li a div div').text newsTime = pd.to_datetime(Time) newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find_element(By.CSS_SELECTOR, 'li a').text) url = inews.find_element(By.CSS_SELECTOR, 'li a').get_attribute('href') show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.outNews = self.outNews.append(data) return nextPage
def _getKeywordNews_sub(self, keyword): newsBox = self.html.find_all('div', {'class':'context-box__content story-list__holder story-list__holder--full'}) if len(newsBox) != 1: show('get more than 1 element. Please confirm.') return self.newsBox = newsBox self.newsList = newsList = newsBox[0].find_all('div', {'class' : 'story-list__news'}) nextPage = True for inews in newsList[:]: Time = inews.find('time').text newsTime = pd.to_datetime(Time).tz_localize(None) self.newsTime = newsTime newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find('h2').find('a').text) url = inews.find('h2').find('a')['href'].replace('\n', '') show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.outNews = self.outNews.append(data) return nextPage
def __downloadContent__(self): show('------------------------------------------------------', level=1, sign='') show('下載新聞內容') data = self.__readNews__() content = data['news_content'] html = None url = None for i,news in data.iterrows(): #if i >1: continue if content[i] != '': continue url = news['web_url'] NewsText = news['title'].replace('\r', '') show(f'下載 {news["web_url"]}, {news["title"]}') try: wait() html = requests.get(url, headers=headers) html = BeautifulSoup(html.text, features='lxml') text = self._method(html, url) except: text = None content[i] = text data['news_content'] = content data.to_excel(self.fileName, index=False)
def topic3(): global src1, src2, src3 # 图一 dst1 = _process_img(src1) # 图二,先对图像进行腐蚀处理,尽量分离两物体边缘交界出,便于后续边缘检测 dst2 = cv.erode(src2, np.ones((8, 8), np.uint8)) dst2 = _process_img(dst2, blurKernel=5, threshold=(110, 255), cannyThre=(150, 100), dilaKernel=5, srcCopy=src2) # 图三 dst3 = cv.erode(src3, np.ones((3, 3), np.uint8)) dst3 = _process_img(dst3, blurKernel=4, threshold=(110, 255), cannyThre=(90, 10), dilaKernel=2, srcCopy=src3) cv.imwrite('DstImgs/' + _ImgPaths['3-3-1'].split('/')[1], dst1) cv.imwrite('DstImgs/' + _ImgPaths['3-3-2'].split('/')[1], dst2) cv.imwrite('DstImgs/' + _ImgPaths['3-3-3'].split('/')[1], dst3) show(src1, dst1) show(src2, dst2) show(src3, dst3)
def _getKeywordNews_sub(self, keyword): newsBox = self.html.find_all('ol', {'data-testid':'search-results'}) if len(newsBox) != 1: show('get more than 1 element. Please confirm.') return self.newsBox = newsBox self.newsList = newsList = newsBox[0].find_all('li', {'class': 'css-1l4w6pd'}) nextPage = True for inews in newsList[:]: Time = self.whichday newsTime = Time newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find('h4').text) url = inews.find('a')['href'] url = 'https://www.nytimes.com' + url if url[0]=='/' else url show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.data = self.outNews.append(data) return nextPage
def salt_pepperNoise(sourceImage, saltP=0, pepperP=0, view=False): """salt_pepperNoise add a possible Noise with respective saltP and pepperP to a image Args: sourceImage: original image saltP: possibility of salt noise pepperP: possibility of pepper noise view: The flag to decided to show the result image Set to False default Returns: retImage: a image polluted by a salt_pepper noise """ retImage = np.zeros(sourceImage.shape, dtype=sourceImage.dtype) retImage[:, :] = sourceImage[:, :] if saltP != 0: count = int(retImage.size * saltP) xHigh = retImage.shape[0] - 1 yHigh = retImage.shape[1] - 1 for i in range(count): x = np.random.random_integers(0, xHigh) y = np.random.random_integers(0, yHigh) retImage[x][y] = 255 if pepperP != 0: count = int(retImage.size * pepperP) xHigh = retImage.shape[0] - 1 yHigh = retImage.shape[1] - 1 for i in range(count): x = np.random.random_integers(0, xHigh) y = np.random.random_integers(0, yHigh) retImage[x][y] = 0 if view: show(sourceImage, retImage, "salt_pepperNoise") return retImage
def getContent(self, seldate): fn = os.path.join('DATA', 'outNews_aboard', 'NewYorkTime', f'{seldate}.xls') df = pd.read_excel(fn) df = df.drop_duplicates(subset=['title']) for ii, news in df.iterrows(): news = news.fillna('') url = news['web_url'] title = news['title'].replace('\r', '') show(f'下載 {url}, {title}') content = news['news_content'] if content != '': continue try: wait() html = requests.get(url, headers=headers) html = BeautifulSoup(html.text, features='lxml') method = { 'name': 'section', 'attrs': { 'itemprop': 'articleBody' } } text = html.find(**method) text = text.find_all('p') text = ' '.join([i.text for i in text]) text = re.sub('\s+|\r|\t|\n', ' ', text) except: show('**** something wrong!! Pass it!! ****', level=1) text = None if text is None or len(text) > 30000: df.drop(ii, axis=0) continue df.loc[ii, 'news_content'] = text df.to_excel(fn, index=False) print(f'完成,擷取資料如下\n{self.data}')
def geometric_meanProcess(image, view=False): """Geometic_meanProcess using filter only shape [3,3] Args: image: The original image view: The flag to decided to show the result image Set to False default Returns: rets: The list contains the filted image """ from hw2_filter import extending extendingImage = extending(image, 3, 3) height = extendingImage.shape[0] width = extendingImage.shape[1] filterHeight = 3 filterWidth = 3 correlateImg = np.zeros((height, width), dtype=extendingImage.dtype) for x in range(height-(filterHeight-1)): for y in range(width-(filterWidth-1)): sum = 1 for j in range(filterHeight): for k in range(filterWidth): sum = sum * extendingImage[x+j][y+k] correlateImg[x+int((filterHeight-1)/2)][y+int((filterWidth-1)/2)] = sum # import math tarImage = np.zeros((image.shape[0], image.shape[1]), dtype=int) for x in range(tarImage.shape[0]): for y in range(tarImage.shape[1]): tarImage[x][y] = int(pow(correlateImg[x+filterHeight-1][y+filterWidth-1], 1/9)) if view: show(image, tarImage, "geometric_mean") return tarImage
def _getKeywordNews_sub(self, keyword): self.driver.implicitly_wait(30) newsBox = BeautifulSoup(self.driver.find_element(By.CSS_SELECTOR, 'div.search-classic').get_attribute('innerHTML')) self.newsBox = newsBox self.newsList = newsList = newsBox.find_all('li') nextPage = True for inews in newsList[:]: Time = inews.find('p', {'class':'date'}).text Time = re.match('(.*[0-9])\s+', Time).group(1) newsTime = pd.to_datetime(Time) newsTime2 = newsTime + pd.Timedelta(days=1,hours=-7) if newsTime2.strftime('%Y-%m-%d') < self.whichday: show(f'''<<< {self.whichday} 沒有更多 "{keyword}" 相關的新聞 !!! >>>''', level=2, sign='') nextPage = False break title = re.sub('[^\w]', '', inews.find('h4').text) url = inews.find('a')['href'] show(f'找到關鍵字 "{keyword}"; 文章資訊: {newsTime}, {title}, {url}', level=3) data = [[keyword, title, self.Name, newsTime, url, '']] data = pd.DataFrame(data, columns=colName()) self.outNews = self.outNews.append(data) return nextPage
def _topic2(): src = cv.imread(_ImgPath['2-2'], cv.IMREAD_GRAYSCALE) dst = fix_image(src, 54, 46) rows, cols = src.shape # 傅里叶变换 f = np.fft.fft2(dst) fshift = np.fft.fftshift(f) fimg = np.log(np.abs(fshift)) imgio.imwrite('DstImgs/' + 'fft' + _ImgPath['2-3'].split('/')[1], fimg) fftimg = cv.imread('DstImgs/' + 'fft' + _ImgPath['2-3'].split('/')[1], 0) # 构造滤波器(类似陷阱滤波器,过滤一些高频点) mask = cv.threshold(fftimg, 155, 255, cv.THRESH_BINARY_INV)[1] cv.circle(mask, (int(cols / 2), int(rows / 2)), 15, (255, 255, 255), -1) # 傅里叶反变换 ishift = np.fft.ifftshift(fshift * mask) iimg = np.fft.ifft2(ishift) dst = np.abs(iimg) imgio.imwrite('DstImgs/' + _ImgPath['2-2'].split('/')[1], dst) show(src, dst)
def plotProgresskMeans(X, centroids, previous, idx, K, i, color): """plots the data points with colors assigned to each centroid. With the previous centroids, it also plots a line between the previous locations and current locations of the centroids. """ # Plot the examples plotDataPoints(X, idx) # Plot the centroids as black x's plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=60, lw=3, edgecolor='k') # Plot the history of the centroids with lines for j in range(len(centroids)): plt.plot([centroids[j,0], previous[j,0]], [centroids[j,1], previous[j,1]], c=color) # Title plt.title('Iteration number %d' % i) show() raw_input("Program paused. Press Enter to continue...")
def test_7(): """An example from the IPL dataset question :show sum of win_by_runs """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, metric='win_by_runs', date_column_name='date', date_format='%Y-%m-%d', summary_operator=SummaryOperators.SUM) print(query_result) expected_result = """ Summary Operator win_by_runs 0 SUM 8702""" assert (expected_result == query_result.to_string())
def on_pick_point(self, index): if index == -1: return name, target = self.jump_points[index] if isdir(target): settings = load_settings('dired.sublime-settings') smart_jump = settings.get('dired_smart_jump', False) auto = self.new_window == 'auto' if self.new_window is True or ((not smart_jump) and auto) or (smart_jump and auto and len(self.view.window().views()) > 0): self.view.run_command("dired_open_in_new_window", {"project_folder": [target]}) else: show(self.view.window(), target, view_id=self.view.id()) status_message(u"Jumping to point '{0}' complete".format(unicodify(name))) else: # workaround ST3 bug https://github.com/SublimeText/Issues/issues/39 self.view.window().run_command('hide_overlay') msg = u"Can't jump to '{0} → {1}'.\n\nRemove that jump point?".format(name, target) if ok_cancel_dialog(msg): points = load_jump_points() del points[name] save_jump_points(points) status_message(u"Jump point '{0}' was removed".format(name)) self.view.run_command('dired_refresh')
def on_pick_point(self, index): if index == -1: return name, target = self.jump_points[index] if exists(target) and isdir(target) and target[-1] == os.sep: if self.new_window: print(target) self.view.run_command("dired_open_in_new_window", {"project_folder": [target]}) else: show(self.view.window(), target, view_id=self.view.id()) status_message(u"Jumping to point '{0}' complete".format( name if ST3 else name.decode('utf8'))) else: # workaround ST3 bug https://github.com/SublimeText/Issues/issues/39 self.view.window().run_command('hide_overlay') msg = u"Can't jump to '{0} → {1}'.\n\nRemove that jump point?".format( name, target) if ok_cancel_dialog(msg): points = load_jump_points() del points[name] save_jump_points(points) status_message(u"Jump point '{0}' was removed".format(name)) self.view.run_command('dired_refresh')
def plotDataPoints(X, idx): """plots data points in X, coloring them so that those with the same index assignments in idx have the same color """ pass # Create palette # palette = hsv(K + 1) # colors = palette(idx, :) # # # Plot the data # c = dict(enumerate(np.eye(3))) # colors=idx map = plt.get_cmap("jet") idxn = idx.astype('float') / max(idx.astype('float')) colors = map(idxn) plt.scatter(X[:, 0], X[:, 1], 15, edgecolors=colors, marker='o', facecolors='none', lw=0.5) show()
def test_8(): """An example from the IPL dataset question :show mean of win_by_runs in season 2017 in date range '2017-05-09' to '2017-05-12' """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, metric='win_by_runs', slices=[('season', Filters.EQUAL_TO, 2017)], date_range=('2017-05-09', '2017-05-12'), date_column_name='date', date_format='%Y-%m-%d', summary_operator=SummaryOperators.MEAN) print(query_result) expected_result = """ Summary Operator win_by_runs 0 MEAN 7""" assert (expected_result == query_result.to_string())
def test_4(): """An example from the IPL dataset question :show all matches where Royal Challengers Bangalore won the match in season 2008 """ table = pandas.read_csv('data/matches.csv') query_result = show.show( table, slices=[('season', Filters.EQUAL_TO, 2008), ('winner', Filters.EQUAL_TO, 'Royal Challengers Bangalore')], dimensions=['team1', 'team2'], ) print(query_result.to_string()) expected_result = """ team1 team2 0 Mumbai Indians Royal Challengers Bangalore 1 Deccan Chargers Royal Challengers Bangalore 2 Royal Challengers Bangalore Chennai Super Kings 3 Royal Challengers Bangalore Deccan Chargers""" assert (expected_result == query_result.to_string())
def test_9(): """An example from "salary in various regions" to test mean vs median suggestion question :show mean of salary in each resident city """ table = pandas.read_csv('data/salary_in_various_regions.csv') query_result = show.show(table, metric='Salary(in $)', dimensions=['Resident City'], summary_operator=SummaryOperators.MEAN) print(query_result) expected_result = """ Resident City MEAN of Salary(in $) 0 Chicago 1.658889e+05 1 Palo Alto 3.033333e+04 2 Washington 2.002740e+07""" expected_suggestions = "[{'suggestion': 'Median is very different from the Mean', 'oversight': <Oversights.MEAN_VS_MEDIAN: 7>, 'is_row_level_suggestion': True, 'confidence_score': 3.1249999406334665, 'row_list': [{'row': 3, 'confidence_score': 3.1249999406334665}]}]" assert (expected_result == query_result[0].to_string()) assert (expected_suggestions == str(query_result[1]))
def test_7(): """An example from the IPL dataset question :show sum of win_by_runs """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, metric='win_by_runs', date_column_name='date', day_first=False, summary_operator=SummaryOperators.SUM) print(query_result) expected_result = """ SUM of win_by_runs 0 8702""" expected_suggestions = "[]" assert (expected_result == query_result[0].to_string()) assert (expected_suggestions == str(query_result[1]))
def test_5(): """An example from the IPL dataset question :show all the umpire1 of season 2017 in date range '2017-05-09'to '2017-05-12' """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, dimensions=['umpire1'], slices=[('season', Filters.EQUAL_TO, 2017)], date_range=('2017-05-09', '2017-05-12'), date_column_name='date', date_format='%Y-%m-%d', summary_operator=SummaryOperators.DISTINCT) print(query_result.to_string()) expected_result = """ umpire1 0 A Deshmukh 1 A Nand Kishore 2 KN Ananthapadmanabhan 3 YC Barde""" assert (expected_result == query_result.to_string())
def test_3(): """An example from the IPL dataset question :show all the distinct seasons available in IPL dataset """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, dimensions=['season'], summary_operator=SummaryOperators.DISTINCT) print(query_result.to_string()) expected_result = """ season 0 2008 1 2009 2 2010 3 2011 4 2012 5 2013 6 2014 7 2015 8 2016 9 2017""" assert (expected_result == query_result.to_string())
def test_2(): """An example from the IPL dataset question :show player_of_match along with their average of win_by_runs in season 2017 in date range '2017-05-09' to '2017-05-12' """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, dimensions=['player_of_match'], metric='win_by_runs', slices=[('season', Filters.EQUAL_TO, 2017)], date_range=('2017-05-09', '2017-05-12'), date_column_name='date', date_format='%Y-%m-%d', summary_operator=SummaryOperators.MEAN) print(query_result) expected_result = """ player_of_match win_by_runs 0 KK Nair 7 1 MM Sharma 14 2 SS Iyer 0 3 WP Saha 7""" assert (expected_result == query_result.to_string())
def test_qobject_children(self): """ Testing QObject.children(). """ w1 = ActEdit() w1.show() show(w1.children()) w2 = SceneWidget() w2.show() show(w2.children()) w3 = CommandSequenceWidget() w3.show() show(w3.children())
def scan_for_shows(): print "SCANNING FOR SHOWS" # We build the collection of shows from the files system global shows print "Numbers of shows is: %s" % len(shows) shows = [] theframe.shows = [] # This contains show objects # Cargo cult recipie that gives only directories, not subdirectories for show_dir in os.walk('./shows').next()[1]: show_dir = "./shows/" + show_dir print "Creating show for directory: %s" % show_dir s = show() s.load_path(show_dir) s.start_observer() shows.append(s) # This merely contains show names theframe.shows.append(os.path.basename(show_dir)) print "Numbers of shows is NOW: %s" % len(shows) component_session.publish("com.hpf.status",'show-list-rebuilt')
def test_8(): """An example from the IPL dataset question :show mean of win_by_runs in season 2017 in date range '2017-05-09' to '2017-05-12' """ table = pandas.read_csv('data/matches.csv') query_result = show.show(table, metric='win_by_runs', slices=[('season', Filters.EQUAL_TO, 2017)], date_range=('2017-05-09', '2017-05-12'), date_column_name='date', day_first=False, summary_operator=SummaryOperators.MEAN) print(query_result) expected_result = """ MEAN of win_by_runs 0 7""" expected_suggestions = "[]" assert (expected_result == query_result[0].to_string()) assert (expected_suggestions == str(query_result[1]))
def scan_for_shows(): print "SCANNING FOR SHOWS" # We build the collection of shows global shows print "Numbers of shows is: %s" % len(shows) shows = [] theframe.shows = [] # This contains show objects # Cargo cult recipie that gives only directories, not subdirectories for show_dir in os.walk('./shows').next()[1]: show_dir = "./shows/" + show_dir print "Creating show for directory: %s" % show_dir s = show() s.load_path(show_dir) s.start_observer() shows.append(s) # This merely contains show names theframe.shows.append(os.path.basename(show_dir)) print "Numbers of shows is NOW: %s" % len(shows) dmsg("List of shows was rebuilt") factory.dispatch("http://localhost/status",'show-list-rebuilt')
def test_10(): """An example from "student score updated to have negative marks" to test attribution with hidden negative oversight question :show sum of proportion of marks for each subject """ table = pandas.read_csv( 'data/student_score_updated_to_have_negative_marks.csv') query_result = show.show( table, metric='marks', dimensions=['subject'], summary_operator=SummaryOperators.PROPORTION_OF_SUM) print(query_result) expected_result = """ subject PROPORTION_OF_SUM of marks 0 Social science 0.399558 1 english 0.000000 2 maths 0.200883 3 science 0.399558""" expected_suggestions = "[{'suggestion': 'There exists negative values among the values on which proportion is being applied', 'oversight': <Oversights.ATTRIBUTION_WITH_HIDDEN_NEGATIVES: 11>, 'is_row_level_suggestion': True, 'confidence_score': 1, 'row_list': [{'row': 2, 'confidence_score': 1}, {'row': 3, 'confidence_score': 1}]}]" assert (expected_result == query_result[0].to_string()) assert (expected_suggestions == str(query_result[1]))
def main(): # Tokenisation, stop word removal and stemming. d1 = documents.DOCUMENT_1 _pprint(d1) d1 = tokenise(d1) _pprint(d1) d1 = stop_word_removal(d1) _pprint(d1) d1 = stem(d1) _pprint(d1) # Boolean model ds = Document.from_texts([documents.DOCUMENT_1, documents.DOCUMENT_2, documents.DOCUMENT_3]) q = "nobel" mds = boolean_query(ds, q) show(q) show.items(mds) # Vector space model vsm = VSM(ds) queries = [ "Physicist", "Nobel Prize Genius", "President of the United States", "US President", "Quantum Mechanics", "President of Germany", "Famous politicians" ] for query in queries: show(query) results = vsm.query(query) show(results)
def goto(self, path): show(self.view.window(), path, view_id=self.view.id())