def __init__(self, path=None, arabic=True): self.arabic = arabic # Browsing and writing managers self.br = Browser() self.wr = Writer(path) if path else Writer() # An array for scrapped books self._books_ids = []
def main(argv: List[str]) -> None: if len(argv) != 2: print('Wrong number of arguments.') return writer = None path = argv[1] files = [] if os.path.isfile(path): files.append(path) name = os.path.basename(path).split('.')[0] + '.asm' writer = Writer(os.path.join(os.path.dirname(path), name)) else: for f in os.listdir(path): abspath = os.path.join(path, f) if str(abspath).endswith(".vm"): files.append(abspath) name = os.path.join(path, os.path.basename(path) + '.asm') writer = Writer(name) writer.write_init() for f in files: handle = open(f, "r") name = os.path.basename(f).split('.')[0] writer.set_file_name(name) for line in handle: line = str(line).strip() if line == '' or line.startswith('//'): continue t, tokens = parse_line(line) # Write the VM command as a comment for debugging writer.write_comment(tokens) # Write the actual translated command if t == C_ARITHMETIC: writer.write_arithmetic(tokens[0]) elif t == C_PUSH: writer.write_push(tokens[1], int(tokens[2])) elif t == C_POP: writer.write_pop(tokens[1], tokens[2]) elif t == C_LABEL: writer.write_label(tokens[1]) elif t == C_GOTO: writer.write_goto(tokens[1]) elif t == C_IF: writer.write_if(tokens[1]) # elif t == C_FUNCTION: writer.write_function(tokens[1], int(tokens[2])) # elif t == C_RETURN: writer.write_return() # elif t == C_CALL: writer.write_call(tokens[1], int(tokens[2])) else: print('Invalid command') handle.close() writer.write_end() writer.close()
def __init__(self, path=None, lang="ar", edition_reviews=False): # Language of reviews to be scraped self._lang = lang # Instantiate browsing and writing managers self.wr = Writer(path) if path else Writer() self.br = Browser(edition_reviews) # Initialize an empty threads list self._threads = [] # Counter for reviews from different languages self._invalid = None
def __init__(self, config): self.config = config self.merge_mode = self.config['merge_mode'] self.flag = 0 self.slice_num = self.config['slice_num'] try: if type(config['thread_id_list']) == type(u""): # "thread_id.txt" thread_id_list = config['thread_id_list'] if not os.path.isabs(thread_id_list): thread_id_list = os.path.split(os.path.realpath( __file__))[0] + os.sep + thread_id_list with open(config['thread_id_list'], 'rb') as f: lines = f.read().splitlines() lines = [line.decode('utf-8') for line in lines] config['thread_id_list'] = [ line.split(' ')[0] for line in lines if len(line.split(' ')) > 0 and line.split(' ')[0].isdigit() ] elif config['thread_id_list']: self.config['thread_id_list'] = range(774061, 1792000) else: raise Exception except Exception: print( '如果想输入帖子id,请到thread_id.txt输入。如果想把整个S1爬下来,请把config.json中thread_id_list的值改为true。' ) sys.exit() self.parser = Stage1stParser(self.config) self.session = Stage1stParser(self.config).loginSession() self.writer = Writer(self.config)
def create_netcdf(self): self.globalAttributes.write_attributes(self.ncFile) self.dimensions.write_dimensions(self.ncFile) if self.naming_authority == 'EGO': self.dimensionsEgo.write_dimensions(self.ncFile) self.variables = self.ego_standard_metadata.get_glider_characteristics_variables( ) writer = writer_ego_standard( self.dimensions.get_metadata_dimension()) writer.write(self.variables, self.metadataData['glider_characteristics'], self.ncFile) #self.writer.write_variables_data(self.metadataData['glider_characteristics'], self.variables, self.version) self.variables = self.ego_standard_metadata.get_glider_deployment_variables( ) writer = writer_ego_standard( self.dimensions.get_metadata_dimension()) writer.write(self.variables, self.metadataData['glider_deployment'], self.ncFile) self.variables = self.metadata.get_variables() self.writer_ego = writer_ego(self.data_ego, self.dimensions, self.ncFile) self.writer_ego.write_variables_data( self.metadataData['variables'], self.variables, self.version) else: self.variables = self.metadata.get_variables() self.writer = Writer(self.data, self.dimensions, self.ncFile) self.writer.write_variables_data(self.metadataData['variables'], self.variables, self.version)
def start(): data = OrderedDict() data.update({'Sheet -' : [[1, 5, 9], [2, 'f**k', 0]]}) writer = Writer('inout/Testy.ods') writer.setData(data) matrix = [[1, 2, '3'], [4, 5, 6], ['7', '8', '9']] writer.addSheet('Hi there', matrix) writer.write('ods')
def process(self): wb = openpyxl.load_workbook(self.file) sheet = wb.get_sheet_by_name('Sheet1') data_new_file = {} data_old_file = {} data_final = {} for row in range(1, sheet.max_row + 1): current_row = str(row) new_var = sheet['A' + current_row].value old_var = sheet['C' + current_row].value new_label = sheet['B' + current_row].value old_label = sheet['D' + current_row].value if new_var and new_label: data_new_file[row] = {'var': new_var, 'label': new_label} if old_var and old_label: data_old_file[row] = {'var': old_var, 'label': old_label} for item in data_new_file: label = data_new_file[item]['label'] new_variable = data_new_file[item]['var'].split('_')[0] new_variable_s = new_variable.replace('xxx', '') tmp_old_item = [] for row in data_old_file: old_variable = data_old_file[row]['var'].split('_')[0] old_variable_s = old_variable.replace('xxx', '') if label == data_old_file[row][ 'label'] and old_variable_s == new_variable_s: if item in data_old_file: tmp_old_item.append({ 'updated': 1, 'var': data_old_file[row]['var'], 'label': data_old_file[row]['label'] }) if len(tmp_old_item) == 1: data_final[item] = tmp_old_item[0] else: data_final[item] = { 'updated': 0, 'var': data_new_file[item]['var'], 'label': data_new_file[item]['label'] } ''' for item in data_final: print(str(item) + ' :: ' + str(data_final[item]['updated']) + ' : ' + str(data_final[item]['var']) + ' : ' + str(data_final[item]['label'])) ''' w = Writer(data_final, data_old_file, self.file) w.create_file() return data_final
def main(): b = Book() g = Gui() # GuiRunner(g, "animation").start() # Don't know why I(linux mint) can but other member(windows) will get error with this. # alternatively we don't assingn a thread to gui but make it operate in main g.change_state("W", 7, g.nowhere, g.scheduling) Writer(b, 7, g).start() for i in range(0, 3): g.change_state("R", i, g.nowhere, g.scheduling) Reader(b, i, g).start() for i in range(0, 2): g.change_state("W", i, g.nowhere, g.scheduling) Writer(b, i, g).start() g.animation(50, 50, 5)
def start_writer(self): ''' Creates a new thread responsible for creating a downstream connection to the ground station. ''' writerThread = Writer('127.0.0.1', 9000, self.broadcastQueue) writerThread.setName('ISRU Writer') writerThread.start() return writerThread
def main (): inputFile = sys.argv [1] outputFile = sys.argv [2] text, ids = preProcess (inputFile) vector = lda (text) w = Writer (outputFile, vector, ids) w.getTopics () w.writeJson () print ('Done!')
def run(self): with open(Util.Config.astFile, "rb") as ff: ast = pickle.load(ff) if not (Util.Config.disableAllOpti): if not (Util.Config.disableRMO): print("Performing Relu-maxpool optimization...") ReluMaxpoolOpti.ReluMaxpoolOpti().visit(ast) print("Relu-maxpool optimization done.") if not (Util.Config.disableLivenessOpti): print("Performing Garbage collection...") mtdAST = MtdAST() GC = GarbageCollector.GarbageCollector(ast) GC.run([mtdAST]) print("Garbage collection done.") # Perform type inference and annotate nodes with type information InferType().visit(ast) # if Util.Config.printASTBool : if False: PrintAST().visit(ast) print("\n") sys.stdout.flush() IRUtil.init() compiler = IRBuilderCSF() res = compiler.visit(ast) res = self.fixOuputScale(res, compiler) res = self.fixNames(res, compiler) Util.write_debug_info(compiler.name_mapping) # Insert a generic start_computation and end_computation function call after all input IR statements. res = self.insertStartEndFunctionCalls(res) writer = Writer(Util.Config.outputFileName) debugVarEzPCName = (compiler.name_mapping[Util.Config.debugVar] if (Util.Config.debugVar in compiler.name_mapping) else None) if Util.forEzPC(): codegen = EzPCCodegen(writer, compiler.globalDecls, debugVarEzPCName) else: assert False codegen.printAll(*res) writer.close()
def daily_news(self): # get all articles paper_bundle = Paper_Boy().get_the_paper() # add the statistics Analyzer(paper_bundle).fill_stats() # write tweets tweets = Writer(paper_bundle).write_all_tweets() # publish tweets publisher = Publisher() for tweet in tweets: publisher.post_tweet(tweet) return tweets
def run(self): with open(Util.Config.astFile, 'rb') as ff: ast = pickle.load(ff) if not (Util.Config.disableAllOpti): if not (Util.Config.disableRMO): print("Performing Relu-maxpool optimization...") # Perform optimizations on the AST ReluMaxpoolOpti.ReluMaxpoolOpti().visit(ast) if not (Util.Config.disableLivenessOpti): print("Performing Liveness Optimization...") # Perform liveness analysis optimization on the AST mtdAST = MtdAST() LivenessOpti.LivenessAnalysis().visit(ast) LivenessOpti.LivenessOpti().visit(ast, [mtdAST, 0, {}]) if Util.Config.printASTBool: PrintAST().visit(ast) sys.stdout.flush() # Perform type inference InferType().visit(ast) IRUtil.init() compiler = IRBuilderCSF() res = compiler.visit(ast) Util.write_debug_info(compiler.name_mapping) # Insert a generic start_computation and end_computation function call after all input IR statements. res = self.insertStartEndFunctionCalls(res) writer = Writer(Util.Config.outputFileName) debugVarEzPCName = compiler.name_mapping[Util.Config.debugVar] if ( Util.Config.debugVar in compiler.name_mapping) else None if Util.forEzPC(): codegen = EzPCCodegen(writer, compiler.decls, debugVarEzPCName) else: assert False codegen.printAll(*res) writer.close()
def start(): writer = Writer(wordFilename) while True : i = writer.getLen() + 1 print(str(i) + ": ") word = input() filter(lambda a: a != "\n", word) # ==================================== if(word[0] == "$"): word = word.split(" ")[1:] filter(lambda a: a != " ", word) writer.deleteContent(word) # ==================================== elif(word == "q"): break # ==================================== elif(word == "p"): listOfWords = writer.readFile() print("\n=====================================================\n") print(str(listOfWords) + "\n") print("\n=====================================================\n") # ==================================== elif (word == "b"): enteredPass = input("Password To get Backup : ") if ( enteredPass == str(password) ): writer.createBackup() # ==================================== elif(word =="c"): enteredPass = input("Password To Delete Everything : ") if ( enteredPass == str(password) ): writer.clearFile() # ==================================== else: if( writer.appendNoneRepeated(word) == False): print("Word Already Added") print("Done") input()
def distribute_test_suite(node_dict, test_suite, data): """SSH into the remote instance and transfer data with SCP.""" # compress project-dir project = shutil.make_archive(expanduser('~/tmp/project'), 'gztar', root_dir=data['project']) writer = Writer(data, test_suite.content) #distribute test suite among instances for node, bundle in zip(node_dict.iteritems(), test_suite): config, cl = writer.generate_input(bundle) ip = node[1] key_file = data['ssh-key'] user = data['username'] client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=ip, username=user, key_filename=key_file) with SCPClient(client.get_transport()) as scp: scp.put(config, "/home/" + user + "/tmp/config.tar.gz") scp.put(cl, "/home/" + user + "/tmp/params.tar.gz") scp.put(project, "/home/" + user + "/tmp/project.tar.gz") client.close()
def append_netcdf(self): self.dimensions.set_dimensions_by_netcdf(self.ncFile.dimensions) self.writer = Writer(self.data, self.dimensions, self.ncFile) self.writer.write_append_variables_data(self.metadata.get_variables())
name = msg[:i] msg = msg[i + 1:] break value = "" for i in range(0, msg.__len__()): if msg[i] == 0: value = msg[:i] msg = msg[i + 1:] break value = int(value) options.append((name, value)) for name, value in options: if name == b'windowsize': window_size = min(4, value) if window_size < 1: window_size = 1 break if mode != b'octet': send_error_message( 0, "Unsupported mode: " + str(mode) + ". This server supports only octet mode currently.", addr, server) continue if op == 1: Reader(addr, file_name, mode, HOST, window_size).start() elif op == 2: Writer(addr, file_name, mode, HOST, window_size).start()
buf_red = bytearray(w * h // 8) # used by frame buffer (landscape) buf_epaper_black = bytearray(w * h // 8) # used to display on e-paper after bytes have been buf_epaper_red = bytearray(w * h // 8) # moved from frame buffer to match e-paper (portrait) import framebuf fb_black = framebuf.FrameBuffer(buf_black, w, h, framebuf.MONO_VLSB) fb_red = framebuf.FrameBuffer(buf_red, w, h, framebuf.MONO_VLSB) black_red = 0 # will be black on buf_black, red on buf_red white = 1 d_b = DummyDisplay(buf_black, w, h, framebuf.MONO_VLSB) d_r = DummyDisplay(buf_red, w, h, framebuf.MONO_VLSB) d_b.fill(white) d_r.fill(white) wri_b = Writer(d_b, font10, False) Writer.set_textpos(d_b, 6, 0) # verbose = False to suppress console output wri_b.printstring(quote_list[0], True) wri_r = Writer(d_r, font10, False) Writer.set_textpos(d_r, 110, 0) #y position 110 is max for font10 wri_r.printstring(' -'+quote_list[1], True) # Move frame buffer bytes to e-paper buffer to match e-paper bytes organisation. x=0; y=0; n=1; R=0 for i in range(1, 17): for j in range(1, 297): R = (n-x)+((n-y)*15) buf_epaper_black[R-1] = buf_black[n-1] buf_epaper_red[R-1] = buf_red[n-1] n +=1 x = n+i-1
def main(): if len(sys.argv) > 1: config_path = sys.argv[1] else: config_path = './configs/config_default.txt' if not Path(config_path).is_file(): logging.error("Could not find config file!") sys.exit(1) # exiting with error code # load config config = configparser.ConfigParser() config.read(config_path) log_dir = config['PATHS']['log_dir'] log_file_name = config['PATHS']['log_file_name'] # check if config dir is present if not Path(log_dir).is_dir(): logging.error("Logging directory is not present!") sys.exit(1) # exiting with error code file_handler = TimedRotatingFileHandler(os.path.join( os.path.dirname(__file__), log_dir, log_file_name), when='midnight', interval=1) console_handler = logging.StreamHandler() logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[file_handler, console_handler]) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("apscheduler.scheduler").setLevel(logging.WARNING) logging.getLogger("apscheduler.executors.default").setLevel( logging.WARNING) logging.getLogger("chardet.charsetprober").setLevel(logging.WARNING) logger.info("=======Starting=Crawler=========") # store config preferences in variables article_download_pattern = ([ (int(config['ARTICLE_DOWNLOAD_PATTERN']['number']), int(config['ARTICLE_DOWNLOAD_PATTERN']['delay'])), ]) # [(application number, period in seconds) ... ] number_download_worker = int(config['CRAWLING']['number_download_worker']) website_request_timeout = int( config['REQUESTS']['website_request_timeout']) rss_feed_crawl_period = int(config['CRAWLING']['rss_feed_crawl_period']) rss_feed_request_timeout = int( config['REQUESTS']['rss_feed_request_timeout']) warmup_iterations = int(config['CRAWLING']['warmup_iterations']) throttle_velocity = float(config['CRAWLING']['throttle_velocity']) max_offset = int(config['CRAWLING']['max_offset']) downloads_path = config['PATHS']['downloads'] crawled_rss_articles_path = config['PATHS']['rss_articles'] feed_path = config['PATHS']['feeds_list'] requests_path = config['PATHS']['requests'] # partly validating the config if not Path(feed_path).is_file(): logging.error("Could not find RSS feeds list file!") sys.exit(1) # exiting with error code parent_dir = os.path.dirname(requests_path) if not Path(parent_dir).is_dir(): logging.error("Could not find requests directory!") sys.exit(1) # exiting with error code writer = Writer() writer.start() throttle = Throttle(request_velocity=throttle_velocity) rss_requester = Requester(tag="RSS Requester", path=requests_path, throttle=throttle) website_requester = Requester(tag="Website Requester", path=requests_path, throttle=throttle) scheduler = Scheduler(patterns=article_download_pattern) crawler = Crawler(requester=rss_requester, scheduler=scheduler, feed_path=feed_path, crawled_rss_articles_path=crawled_rss_articles_path, rss_feed_crawl_period=rss_feed_crawl_period, rss_feed_request_timeout=rss_feed_request_timeout, warmup_iterations=warmup_iterations, max_offset=max_offset) crawler.start() for i in range(number_download_worker): logger.info("Starting download worker #%d", i) DownloadWorker(requester=website_requester, timeout=website_request_timeout, path=downloads_path).start() while True: time.sleep(60) logger.debug("Number of threads running: %d", threading.active_count()) process = psutil.Process(os.getpid()) ram_usage = process.memory_full_info() # percent = absolute/mem.total logger.info("RAM usage: %s%%, %s", process.memory_percent(), ram_usage)
def __init__(self): self.writer = Writer('hashing_output.csv', ['VECTORS', 'SHA-1', 'SHA-2', 'SHA-3'])
def __init__(self): self.writer = Writer('verify_output.csv', ['VECTORS', 'RSA_PSS', 'ECDSA', 'DSA'])
def process(ROOT_PATH, CORE_FILE, fid, rev): WXRX_LOG_FILE = _get_log_file_(ROOT_PATH, fid) #set BASE_TIME from the 2nd line (logging start) in the WXRX_LOG_FILE BASE_TIME = get_base_time(WXRX_LOG_FILE) WXRX_NETCDF_FILENAME = 'weather-radar_faam_%s_r%s_%s.nc' % ( datetime.datetime.strftime(BASE_TIME, '%Y%m%d'), str(rev), str.lower(fid)) if os.path.exists(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME)): sys.stdout.write('weather radar netCDF\n') sys.stdout.write(' ... %s\n' % os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME)) sys.stdout.write('already exists! Exiting ...\n') sys.exit(2) # get unique valid wxrx-tmp-filelist from log file wxrx_file_list = get_wxrx_tmp_filelist(WXRX_LOG_FILE) # Calculate total size of tmp-wxrx-data MAX_SIZE = np.max([ os.stat(os.path.join(ROOT_PATH, wxrx_file)).st_size for wxrx_file in wxrx_file_list ]) MAXIMUM_NUMBER_OF_RECORDS = (MAX_SIZE * 8 / 1744) + 1 wxrx_data_list = [] _RECS = np.zeros(MAXIMUM_NUMBER_OF_RECORDS, dtype=[('label', np.str_, 4), ('control_accept', np.byte), ('slave', np.byte), ('mode_annunciation', np.byte), ('faults', np.byte), ('stabilization', np.byte), ('operating_mode', np.byte), ('tilt', np.float), ('gain', np.float), ('range', np.int16), ('data_accept', np.byte), ('scan_angle', np.float), ('reflectivity', np.byte, (512, ))]) A708 = Arinc708() for wxrx_file in wxrx_file_list: sys.stdout.write('Reading ... %s\n' % (wxrx_file)) # TODO: adding progressbar to see where we are including ETA wxrx_data = Reader(os.path.join(ROOT_PATH, wxrx_file)) wxrx_data.parse() sys.stdout.write(wxrx_data) ix = [] for i in range(len(wxrx_data.Buswords)): try: _RECS[i] = A708.parse(wxrx_data.Buswords[i]) ix.append(i) except: pass wxrx_data.sIndexList = list(np.array(wxrx_data.sIndexList)[ix]) add_timestamp(wxrx_data, WXRX_LOG_FILE) wxrx_data.Records = _RECS[ix] wxrx_data_list.append(wxrx_data) # Delete to save memory del (wxrx_data) # TODO _s = Setup(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME)) sys.stdout.write('Creating empty netCDF ...\n') sys.stdout.write('Writing data to ... %s\n' % (os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME))) wxrx_nc_writer = Writer(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME), wxrx_data_list) wxrx_nc_writer.write() sys.stdout.write('Merging faam_core data ... %s\n' % (CORE_FILE)) # TODO wxrx_nc_writer.merge_core_file(CORE_FILE) wxrx_nc_writer.close() # create overview figure Overview( os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME), os.path.join( ROOT_PATH, '%s_%s_wxrx_overview.png' % (fid, datetime.datetime.strftime(BASE_TIME, '%Y%m%d'))))
help='Port to listen on') grp.add_argument('--maxConnections', type=int, default=10, metavar='count', help='Maximum number of simultaneous connections') args = parser.parse_args() logger = MyLogger.mkLogger(args) logger.info('args=%s', args) try: fwd = Forwarder(args, logger) # Create a packet forwarder fwd.start() # Start the forwarder writer = Writer(args, logger) # Create the db writer thread writer.start() # Start the writer thread queues = [fwd.q, writer.q] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: logger.debug('Opened socket') s.bind(('', args.port)) logger.debug('Bound to port %s', args.port) s.listen() logger.debug('Listening to socket') while writer.is_alive(): (conn, addr) = s.accept() # Wait for a connection logger.info('Connection from %s', addr) thrd = Reader(conn, addr, logger, queues) # Create a new reader thread
def update_display(display, payload): def calculate_width(font, text): w = 0 for c in text: glyph, char_height, char_width = font.get_ch(c) w += char_width return w try: command = {} if payload is None: # An example command for testing command = { "heating_state": True, "msg": "auto", "inside_temp": 23.456, "outside_temp": -9.876 } else: try: import ujson as json command = json.loads(payload) except (OSError, ValueError): import kiota.Util as Util Util.log(update_display, "Can't parse payload: '{}'".format(payload)) display.fill(0) ink = 1 heating_state = command["heating_state"] if heating_state: ink = 0 display.fill_rect(0, 0, 128, 14, 1) if command["msg"] is not None: display.text(str(command["msg"]), 0, 1, ink) if command["heating_state"]: display.text("ON", 104, 1, ink) else: display.text("OFF", 104, 1, ink) import KameronRegularNumbers25 as font from Writer import Writer writer = Writer(display, font) writer.set_clip(True, True) inside_temp = "--" try: inside_temp = str(int(round(float(command["inside_temp"])))) except: pass writer.set_textpos(23, int((64 - calculate_width(font, inside_temp)) / 2)) display.fill_rect(0, 15, 64, 41, 0) writer.printstring(inside_temp) outside_temp = "--" try: outside_temp = str(int(round(float(command["outside_temp"])))) except: pass writer.set_textpos( 23, 64 + int((64 - calculate_width(font, outside_temp)) / 2)) display.fill_rect(64, 15, 64, 41, 0) writer.printstring(outside_temp) display.text("inside", 0, 56) display.text("outside", 72, 56) display.show() except Exception as e: display.text("ERROR", 0, 0) display.show() import sys sys.print_exception(e) return True
destfile = 'stonk_data.csv' threads = 10 ### Debug Levels: # 0 = Critical # 1 = Error # 2 = Warn # 3 = Info debug_level = 2 header = [ 'date', 'ticker', 'return_on_invested_capital', 'enterprise_yield', 'price_12m_ago', 'price_6m_ago', 'last_price', '12m_percent_change', '6m_percent_change', 'marketCap', 'marketCap_category', 'ebit', 'working_capital', 'netPPE', 'enterprise_value' ] last_open = get_last_trading_day() # Will take a csv of tickers, reading the first column as the ticker names. with open(sourcefile, 'r') as f: tickers = [row.split(",")[0] for row in f] writer = Writer(destfile, header) # Now with threads! Thanks wedgie. with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: for i in tickers: executor.submit(ticker_wrapper, i.strip(), last_open, writer, debug_level)
def salvar(self): # print "salvou?" Writer(self)
def __init__(self): self.writer = Writer('decryption_output.csv',['VECTORS','AES-EBC','AES-CBC','RSA_OAEP'])
def testRedundancyRemover(): writer = Writer("test2.txt") writer.removeRedundantData()
W.writeTableRow("March 30, 2017:", "**129**") W.writeTableRow("February 28, 2017:", "**102**") W.writeTableRow("January 30, 2017:", "**62**") W.writeTableRow("December 30, 2016:", "**40**") W.writeTableRow("November 30, 2016:", "**20**") W.writeTableRow("October 30, 2016:", "**5** ") def pushToGit(): os.system("cd ~/personal/LEETCodePractice/") os.system("git add .") os.system("git commit -m \"added files\" ") os.system("git push") S = None W = None if __name__ == "__main__": S = Scraper() W = Writer() writeHeader() writeQsSolved() writeLog() W.cleanup() S.cleanup() pushToGit()
from Mapping import Mapping from Compare import Compare from Writer import Writer #gold standard filename_wiktionary = "files/wiki_output_with_pgmc_forms.csv" #test languages filename_results = "files/results.csv" #validation languages filename_extend = "files/results_kroonen_northeuralex.csv" if __name__ == '__main__': Reader = Reader() Mapping = Mapping() Compare = Compare() Writer = Writer() #reads files results = Reader.read_csv(filename_results) wiki = Reader.read_csv(filename_wiktionary) validation_set = Reader.read_csv(filename_extend) #maps wiki to test set all_words, wiki_words, res_words, id_lst = Mapping.maps(wiki, results) print("all words common between both lists:", len(all_words)) print("wiktionary cognate sets", len(wiki_words)) print("results cognate sets", len(res_words)) #compares data and outputs results aux_test = Compare.compare(wiki_words, res_words, all_words)