def format_pack(self): try: assert len(self.packNameLineEdit.text()) > 0 and \ len(self.packAuthorLineEdit.text()) > 0 if len(self.packArtistLabel.text()) == 0: artist = "Various Artists" else: artist = self.packArtistLabel.text() format(self.current_song_path, self.current_song_list, self.packNameLineEdit.text(), artist, self.packAuthorLineEdit.text(), self.show_progress) except AssertionError: print("Input missing.")
def process_files(): """ Process input files """ inputfiles = None outputfile = None encoding = constants.DEFAULT_ENCODING parser = liliargparser.LiliArgParser().parse() if (parser.getinputfiles()): inputfiles = parser.getinputfiles() if (parser.getencoding()): encoding = parser.getencoding() if (parser.getoutputfile()): outputfile = parser.getoutputfile() # Process input files for f in inputfiles: sr = script.Reader(f, encoding) sw = script.Writer(outputfile) messagecount = 0 for block in sr.read(): if (parser.warning_enabled()): warnings.check(block, f) if (parser.wordwrap_enabled()): wrapper.wrap(block) if (parser.format_enabled()): formatter.format(block) if (parser.verbose_enabled()): if len(block.gettext()) > 0 : messagecount += 1 if (not parser.warning_enabled()): if(parser.text_only()): sw.write(block, textonly=True) else: sw.write(block) if (parser.verbose_enabled()): print(f, ': ', messagecount, " text messages", sep='', file=sys.stderr)
def update_data_file(get_datadir, template_name, data_item): data = get_data(get_datadir, template_name) data.append(data_item) data_json = format(data) return_code = write_file(get_data_path(get_datadir, template_name), data_json) if return_code[0] == 0: print("...ok") return return_code
def recvConnection(): while 1: try: try: conn, addr = s.accept() conn.settimeout(3) conn.sendall("Username: "******"Password:"******"Socket closed.\nStopped." sys.exit()
def main(unused_argv): logging.set_verbosity(logging.DEBUG) with tf.Session() as sess: src = gen_parser_ops.document_source(batch_size=32, corpus_name=FLAGS.corpus_name, task_context=FLAGS.task_context) sentence = sentence_pb2.Sentence() while True: documents, finished = sess.run(src) logging.info('Read %d documents', len(documents)) for d in documents: sentence.ParseFromString(d) print formatter.format(sentence) # d_raw = d # # sentence.ParseFromString(d) # # tr = asciitree.LeftAligned() # # d = to_dict(sentence) # # print 'Input: %s' % sentence.text # # print 'Parse:' # # print tr(d) # print d_raw if finished: break
"\n" "commands:\n" + "\n".join(" localhub {}{} - {}".format(command[0], ((" " + " ".join( "\033[4m" + arg + "\033[0m" for arg in command[2])) if command[2] else ""), command[1]) for command in LocalhubClient.commands())) if __name__ == "__main__": command_line = sys.argv[1:] if not command_line: print(usage(), file=sys.stderr) sys.exit(1) command, *args = sys.argv[1:] try: client = LocalhubClient() except socket.error as e: print("Couldn’t connect. Is localhubd running?", file=sys.stderr) sys.exit(2) method = getattr(client, 'cmd_' + command, None) if method is None: print(usage(), file=sys.stderr) sys.exit(1) for message in method(*args): for line in formatter.format(message): print(line)
if len(sys.argv) > 1: url = sys.argv[1] else: url = 'https://lenta.ru/news/2016/08/04/peskov_medved/' user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, headers) try: with urllib.request.urlopen(req) as response: data = response.read().decode('utf-8') parser = parser2.Parser2() parser.feed(data) classifier.classifier(parser.contents) text = '' for content in parser.contents: if content.is_content and content.text: text += content.text + '{'+ str(content.is_content) +'}' + '\n' text = formatter.format(text) with io.open('result.txt', 'w', encoding='utf8') as f: f.write(text) f.close() except HTTPError as e: print('The server couldn\'t fulfill the request.') print('Error code: ', e.code) except URLError as e: print('We failed to reach a server.') print('Reason: ', e.reason)
if len(sys.argv) > 1: url = sys.argv[1] else: url = 'https://lenta.ru/news/2016/08/04/peskov_medved/' user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)' headers = {'User-Agent': user_agent} req = urllib.request.Request(url, None, headers) try: with urllib.request.urlopen(req) as response: data = response.read().decode('utf-8') parser = parser2.Parser2() parser.feed(data) classifier.classifier(parser.contents) text = '' for content in parser.contents: if content.is_content and content.text: text += content.text + '{' + str(content.is_content) + '}' + '\n' text = formatter.format(text) with io.open('result.txt', 'w', encoding='utf8') as f: f.write(text) f.close() except HTTPError as e: print('The server couldn\'t fulfill the request.') print('Error code: ', e.code) except URLError as e: print('We failed to reach a server.') print('Reason: ', e.reason)
command[0], ( (" " + " ".join("\033[4m" + arg + "\033[0m" for arg in command[2])) if command[2] else "" ), command[1] ) for command in HomedClient.commands() ) ) if __name__ == "__main__": command_line = sys.argv[1:] if not command_line: print(usage(), file=sys.stderr) sys.exit(1) command, *args = sys.argv[1:] try: client = HomedClient() except socket.error as e: print("Couldn’t connect. Is homed running?", file=sys.stderr) sys.exit(2) method = getattr(client, 'cmd_' + command, None) if method is None: print(usage(), file=sys.stderr) sys.exit(1) for line in method(*args): print(formatter.format(line))
import sys from formatter import format if len(sys.argv) < 2: print("Error! Expected name of file.") else: if "-h" in sys.argv[1:]: print("""To format your Python code use commands described bellow:\n python runformatter.py <name of file to format> | format your file with default configurations\n python runformatter.py <name of file to format> -f <configuration file> | format with configs in chosen file\n python runformatter.py <name of file to format> -c <option of config-1><value-1>...<option of config-N><value-N> |format with default but some options is changing\n python runformatter.py <name of file to format> -c <option of config-1><value-1>...<option of config-N><value-N> -n <file to save configs> |format with default but some options is changing and save them to another file\n python runformatter.py -i | run interactive mode for creation your customized configuration template\n python runformatter.py -i -f <configuration file> | run interactive mode for creation your customized configuration template based on config file you entered\n """) else: try: format(sys.argv) except OSError: print("Error! File not found.")
if not isinstance(page, int): raise TypeError if not isinstance(zip_code, int): raise TypeError if not isinstance(is_debug, int): raise TypeError except TypeError: raise TypeError # crawl crawler = crawler.Crawler(page, zip_code, {}, debug.Debug(1)) uris = asyncio.get_event_loop().run_until_complete(crawler.crawl()) debug.Debug(1).console(uris) # parse parsers = list() formatter = formatter.Formatter() dataset = list() for uri in uris: parsers.append(psr.Psr(uri, parse_rule)) for parser in parsers: data = asyncio.get_event_loop().run_until_complete(parser.parse()) formatted = formatter.format(data) dataset.append(formatter.flatten(data)) time.sleep(2) # randomize time exporter = exporter.Exporter() exporter.toCSV('./test.csv', dataset)
# same. c = D[0][1] for i in range(1, len(D)): if D[i][1] != c: return False return True def main(data): # Create our decision tree on the data given to us. # data[0] is the training data. classifier = Classifier(data[0]) # Pretty print our decision tree pp.pprint(classifier.dt) print() # Get and print the results of running our decision tree # on our training data. It is returned as a list. # Format of element in results: (<data>, <predicted_class>) results = classifier.classify(data[1]) pp.pprint(results) if __name__ == "__main__": main((formatter.format("data.csv"), formatter.format("test_data.csv")))
def default_missing_handler(self, context, message): valid_ids = ", ".join(self.message_handlers.keys()) err_msg = "Received message '{}' but the valid messageIds are: '{}'" \ .format(message['sentence_id'], valid_ids) logger.debug(err_msg) return formatter.format(self.error_sentence_id + "," + err_msg)
def default_bad_checksum(self, context, raw_message): good_checksum = formatter.calc_checksum(raw_message) err_msg = "Message '{}' has a bad checksum. Correct checksum is '{}'" \ .format(raw_message, good_checksum) logger.debug(err_msg) return formatter.format(self.error_sentence_id + "," + err_msg)
def default_error_handler(self, context, err): logger.debug("Error detected in default nmeaserver handler", kwargs={"exc_info": 1}) return formatter.format(self.error_sentence_id + ", The nmeaserver experienced an exception." \ " Check the debug logs")
def __main__(): tokens = load_cpp("test/test1.cpp") root = parse(tokens) print(format(root))
(" " + " ".join("\033[4m" + arg + "\033[0m" for arg in command[2])) if command[2] else "" ), command[1] ) for command in LocalhubClient.commands() ) ) if __name__ == "__main__": command_line = sys.argv[1:] if not command_line: print(usage(), file=sys.stderr) sys.exit(1) command, *args = sys.argv[1:] try: client = LocalhubClient() except socket.error as e: print("Couldn’t connect. Is localhubd running?", file=sys.stderr) sys.exit(2) method = getattr(client, 'cmd_' + command, None) if method is None: print(usage(), file=sys.stderr) sys.exit(1) for message in method(*args): for line in formatter.format(message): print(line)
def scrapeThread(thread_id, om=False): global players_df global votes_df global phases_df # Store page in variable thread_url = base_thread_url+thread_id if om: thread_url = om_thread_url+thread_id era_page = getSoup(thread_url, False) # Find out how many pages there are numPages = 1 if om: pages = era_page.find("span", {"class" : "pageNavHeader"}) if(pages != None): nav = pages.contents[0].split(" ") numPages = int(nav[3]) else: aList = era_page.find_all('a', {'class':'pageNavSimple-el pageNavSimple-el--current'}) for a in aList: numPages = int(a.get_text(strip=True).split(" of ")[1]) break print("lastPage is "+str(numPages)) #Banner banner_url = None #By default, the scraper should start scanning on page 1, and have no #reference to the last day end post scanned. lastPage = 1 lastPost = None #Check if there's a file corresponding to this game already #if so, we load all game info and set variables so the scraper knows #which page and post to start scraping from. try: players_df = pd.read_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_players.csv") phases_df = pd.read_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_phases.csv") votes_df = pd.read_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_votes.csv") #banner_url = data["banner_url"] #We find out the last day end page and post numbers, so we can start scraping from that point. lastPage = phases_df.phase_end_page.max() lastPost = phases_df.phase_end_number.max() except Exception as e: print("No file found, or error loading file: ") print (e) # Load pages asynchronically, I'm a mad scientist session = FuturesSession(max_workers=10) requests = [] for p in range(int(lastPage), numPages + 1): # Each page request gets added to the session, as well as the getSoupInBackground # function which lets us do some additional stuff on the background page_url = thread_url + "page-" + str(p) requests.append(session.get(page_url, background_callback=lambda sess, resp: getSoupInBackground(sess, resp, om))) # For each page: for p in range(0, len(requests)): print("Page "+str(p)) #Wait if needed for the request to complete. By the time it's done we should have #access to the posts, users and links as parsed by the getSoupInBackground function pageData = requests[p].result().data #These are the posts posts = pageData["posts"] #These are the users users = pageData["users"] #These are the links links = pageData["links"] #These are the links timestamps = pageData["timestamps"] if(not om): i = 0 while(i != len(links)): lstr = links[i].find("a")['href'].partition("/permalink")[0] if("#" in lstr or "threadmarks" in lstr): links.pop(i) i = i - 1 i = i+1 # If there are no active phases yet, grab banner url if (len(phases_df) == 0): img = posts[0].find("img") if(img != None and img.has_attr('src')): banner_url = img["src"] if '/' == banner_url[-1]: banner_url[-1] = ' ' print(banner_url) #For each post in this page: for i in range(0, len(posts)): nextPost = False #Get the current post's content, the user, the link, timestamp and the post number currentPost = posts[i] currentUser = users[i].find("a", {"class": "username"}).get_text(strip=True).lower() currentLink = era_url+links[i].find("a")['href'].partition("/permalink")[0] currentTimestamp = timestamps[i].find("time")['datetime'] if (om): currentLink = om_url+links[i].find("a")['data-href'].partition("/permalink")[0] currentTimestamp = timestamps[i].find("span")['title'] currentPostNum = links[i].find("a").string; try: current_phase_info = phases_df.loc[phases_df.phase_number.idxmax()] phaseNum = phases_df.phase_number.max() except Exception as e: current_phase_info = pd.DataFrame() phaseNum = 0 # Increment post count only if the latest phase is active if(len(current_phase_info) > 0 and pd.isnull(phases_df.loc[phases_df.phase_number.idxmax(), "phase_end_link"])): if(len(players_df) > 0 and len(players_df[players_df.name == currentUser]) > 0): players_df.loc[players_df.name == currentUser, "post_count_"+str(current_phase_info.phase_number)] += 1 else: players_df = players_df.append({"name":currentUser, "post_count_"+str(current_phase_info.phase_number):1}, ignore_index = True) currentPostInt = int(currentPostNum.replace("#", "").replace(",", "").strip()) # If we set a last day end post, meaning we loaded some previous game data, # skip all posts until the one after it, by comparing post numbers. if (lastPost != None): #Ignore the post if its number is lower than last post if(currentPostInt <= lastPost): continue #Mark last post as none so we don't have to make this comparison for future posts else: lastPost = None #Extract quotes so we don't accidentally count stuff in quotes hasQuote = currentPost.findAll("div", {"class": " bbCodeBlock bbCodeBlock--expandable bbCodeBlock--quote"}) if(om): hasQuote = currentPost.findAll("div", {"class": "bbCodeBlock bbCodeQuote"}) for quote in hasQuote: # Skips quoted posts quote.extract() #Find all potential "actions" action_list = currentPost.find_all("span") if(om): action_list = currentPost.find_all("strong") if len(action_list) > 0: for action in action_list: if nextPost: break #Check for color tags if (action.has_attr('style') and 'color' in action['style']) or (action.has_attr('class') and 'bbHighlight' in action['class']): #I'm removing bold tags here to simplify the command matching procedure for match in action.findAll('b'): match.replaceWithChildren() #Check for valid commands for line in str(action).lower().splitlines(): if nextPost: break #If the day is starting, set the current day variable to a new day if(bool(re.search(command_day_begins, line, re.IGNORECASE))): print("New day begins on post "+currentPostNum+"("+currentLink+")") #This is to use the day identifier as part of the title #of this day phase in the view of the data. m = re.search(command_day_begins, line, re.IGNORECASE) current_day_name = m.group(2) phaseNum = 0 if(len(current_phase_info) > 0): phaseNum = current_phase_info.phase_number+1 new_day_info = {"phase_name":current_day_name, "phase_start_link":currentLink, "phase_start_number":currentPostInt, "phase_start_page":p+lastPage, "phase_start_timestamp":currentTimestamp, "phase_number":phaseNum, "phase_end_link":pd.np.nan, "phase_end_number":pd.np.nan, "phase_end_page":pd.np.nan, "phase_end_timestamp":pd.np.nan} phases_df = phases_df.append(new_day_info, ignore_index = True) current_phase_info = phases_df.loc[phases_df.phase_number.idxmax()] players_df["post_count_"+str(current_phase_info.phase_number)] = 0 nextPost = True break #If the day has ended, append the current day to the days variable and then clear it if(bool(re.search(command_day_ends, line, re.IGNORECASE))): if len(current_phase_info) == 0: continue print("Day ends on "+currentPostNum) phases_df.loc[phases_df.phase_number.idxmax(), "phase_end_link"] = currentLink phases_df.loc[phases_df.phase_number.idxmax(), "phase_end_number"] = currentPostInt phases_df.loc[phases_df.phase_number.idxmax(), "phase_end_page"] = p+lastPage phases_df.loc[phases_df.phase_number.idxmax(), "phase_end_timestamp"] = currentTimestamp #Update this game's cache files with day info phases_df.to_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_phases.csv",index=False) players_df.to_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_players.csv",index=False) votes_df.to_csv("gamecache_2.0/"+str(thread_id).replace("/","")+"_votes.csv",index=False) break #Handle vote reset command elif(command_reset in line): if len(current_phase_info) == 0: continue votes_df = votes_df.drop(votes_df[votes_df.day == current_phase_info.phase_number].index) print("Votes have been reset!") nextPost = True break #Handle unvote command elif(command_unvote in line): if len(current_phase_info) == 0: continue print(currentUser+" UNVOTED") removeActiveVote(currentUser, phaseNum, currentLink, currentPostInt, currentTimestamp) #Handle vote command elif(command_vote in line): if len(current_phase_info) == 0: continue target = str(line).lower().partition(command_vote)[2].partition('<')[0].strip() print(currentUser+" -> "+ target) removeActiveVote(currentUser, phaseNum, currentLink, currentPostInt, currentTimestamp) addActiveVote(currentUser, target, phaseNum, currentLink, currentPostInt, 1, currentTimestamp) #Handle doublevote command elif(command_doublevote in line): if len(current_phase_info) == 0: continue target = str(line).lower().partition(command_doublevote)[2].partition('<')[0].strip() print(currentUser+" ->> "+ target) removeActiveVote(currentUser, phaseNum, currentLink, currentPostInt, currentTimestamp) addActiveVote(currentUser, target, phaseNum, currentLink, currentPostInt, 2, currentTimestamp) #Handle triple vote command elif(command_triplevote in line): if len(current_phase_info) == 0: continue target = str(line).lower().partition(command_triplevote)[2].partition('<')[0].strip() print(currentUser+" ->>> "+ target) removeActiveVote(currentUser, phaseNum, currentLink, currentPostInt, currentTimestamp) addActiveVote(currentUser, target, phaseNum, currentLink, currentPostInt, 3, currentTimestamp) return formatter.format(votes_df, players_df, phases_df)