def test_date(self): """Сhecks whether the result of calling function "main" with two different non-existent dates would be equal. """ result = reader.main(['news', '--date', '20201301']) expected = reader.main(['news', '--date', '20201132']) self.assertEqual(result, expected)
def test_version(self): """Сhecks if the "main" function's returning value is not equal to a certain string. """ result = reader.main(['address', '--version']) self.assertNotEqual(result, 'Latest_version')
def test_verbose(self): """Сhecks if the function "main" returns None after taking "verbose" argument. """ result = reader.main(['http', '--verbose']) self.assertIsNone(result)
def get_amr_table_path(input_path): amr = '' if input_path[-1] != '/': input_path += '/' for i in os.listdir(input_path): tmp = open(input_path + i, 'r').read() amr += raw.wrap(tmp) print 'END OF FILE: %s' % i amr_table = reader.main(amr) ne.add_named_entity(amr_table) nequery.main(amr_table) return amr_table
action="store_true") parser.add_argument("-f", "--file", help="The path of the .pcap file", default="/dev/null") parser.add_argument("-v", "--version", action="version", help="Show program's version number and exit", version="%(prog)s 1.0") args = parser.parse_args() if args.sniffer: if settings.os.getenv("USER") == "root": print(OSniffy) import sniffer print("Running in sniffer mode...") sniffer.main() else: print("You should run the sniffer using sudo") elif args.reader: if args.file is "/dev/null": print(OSniffy) parser.print_help() else: print(OSniffy) print("Reading {FILE}...".format(FILE=args.file)) reader.main(args.file) else: print(OSniffy) parser.print_help()
def get_amr_table_str(input_str): amr = raw.wrap(input_str) amr_table = reader.main(amr) ne.add_named_entity(amr_table) nequery.main(amr_table) return amr_table
import finder import scraper import reader with open('log.txt', 'w', newline = '', encoding = 'UTF-8') as writer: writer.write("Master: START--START--START--START--START\n") print("Master: START--START--START--START--START") """ * Starting point, give it a real player ID * zsda123 is only a placeholder!!! * If it happens to be a real ID or your ID, I apologize for that! """ finder.main(writer, "zsda123") with open('userIdList.txt', 'r', newline = '', encoding = 'UTF-8') as f: userIdListStr = f.read() userIdList = userIdListStr.split() scraper.main(writer, userIdList) reader.main(writer) writer.write("Master: DONE--DONE--DONE--DONE--DONE\n") print("Master: DONE--DONE--DONE--DONE--DONE")
width = height*aspect images.append({ 'width': int(width), 'height': int(height), 'src': url }) return render_template('index.html', query=query, urls=urls, images=images) def get_urls(query): urls = [] path = "train" cat_index = {"sky": 0, "skin": 1, "plant": 2, "snow": 3, "water": 4, "ground": 5, "buildings": 6} index = cat_index.get(query,-1) if index == -1: return urls else: path = os.path.join(path, str(index)) for f in os.listdir(path): f_path = os.path.join(path, f) urls.append(f_path) return urls if __name__ == '__main__': print "initializing app..." print "training classifier..." reader.main() app.run(debug=True)
text = "" for trans in range(nbrTrans): randomizer = random.random() if randomizer > percentageCross: # no cross transaction inp = random.randint(0, nbrShards - 1) out = inp text += str(inp) + ":" + str(out) + "\n" else: # cross transaction inp = random.randint(0, nbrShards - 1) out = (inp + 1) % nbrShards text += str(inp) + ":" + str(out) + "\n" fileToWrite.write(text) ############# # Read the created text file for ChainSpace with a given percentage of cross ############# nedges, ncrosedges, ntrans, ncrostrans = reader.main(fileToWrite, False, path2, "NoFolder") print("Created for ChainSpace : " + str(ncrosedges) + "/" + str(nedges) + " cross edges and " + str(ncrostrans) + "/" + str(ntrans) + " cross transactions") print(" Asked : " + str(percentageCross) + " obtained -> " + str(100.0 * float(ncrosedges) / float(nedges)) + "% of cross edges") print(" Asked : " + str(percentageCross) + " obtained -> " + str(100.0 * float(ncrostrans) / float(ntrans)) + "% of cross transactions")
__author__ = 'Olya' import reader; reader.main();
__author__ = 'Olya' import reader reader.main()
def main(): reader.main() convo = pickle.load(open('convo.pkl', 'rb')) print(convo['sentiment']) c = Canvas('cairo_test_integrate_1', convo, 400, 300)
def main(args): if args.params is None or not os.path.isfile(args.params): print "File '%s' do not exist!" % args.params exit(0) # ====== MODEL INPUTS ====== try: inputs = model.CParams(args.params) except Exception as e: raise e if args.mods is not None: for mod in args.mods: inputs.Parameters.set_par(*mod.split('=')) # --- fix random number sequences --- np.random.seed(inputs.Parameters.RandomNumberSeed) seeds_lld = 123456 seeds_cir = np.random.randint(1, 1000000) seeds_cfm = np.random.randint(1, 1000000) # ====== DATA VALIDATION ====== try: print time.strftime('%Y-%m-%d %H:%M:%S'), 'Checking input LLD ...' if (inputs.InputFileName is not None) and (os.path.isfile(inputs.InputFileName)): # get paths workdir, name = os.path.split(inputs.InputFileName) body, ext = name.split('.') new_fname0 = os.path.join(workdir, 'temp_' + body + '.' + ext) new_fname1 = os.path.join(workdir, 'data_' + body + '.' + ext) new_fname2 = os.path.join(workdir, 'stat_' + body + '.' + ext) # check input lld reader.main(reader.parser.parse_args(['-i', inputs.InputFileName, '-o', new_fname0])) # make clusterization lld_df, stats = model.lld.run(df=pd.read_csv(new_fname0, sep='\t', encoding=args.encoding, parse_dates=True), inputs=inputs, newseed=seeds_lld) # save clusterization results lld_df.to_csv(new_fname1, sep='\t', encoding=args.encoding) stats.to_csv(new_fname2, sep='\t', encoding=args.encoding) else: raise Exception('file "%s" not found' % inputs.InputFileName) except Exception as e: print "Error while reading LLD:", str(e), '(please check file for ".csv" structure or missed rows/columns)' exit(0) # clear HPIndex, Regions & Regions to prevent heavy network traffic inputs.Datasets.HPIndex = None inputs.Datasets.Regions = None inputs.Datasets.History = inputs.Datasets.History.loc[np.datetime64(inputs.Parameters.EvaluationDate, 'M') - np.timedelta64(12, 'M'):np.datetime64(inputs.Parameters.EvaluationDate, 'M'), :].copy(deep=True) # clear Adjusts - they can be read at each node without network transmission inputs.Adjusts = None # ====== MACRO SIMULATIONS & CASHFLOW MODEL ====== # generate macro seeds sequence np.random.seed(seeds_cir) xmap = np.zeros((inputs.Parameters.NumberOfMonteCarloScenarios, 2), dtype=np.uint64) xmap[:, 0] = np.arange(inputs.Parameters.NumberOfMonteCarloScenarios) xmap[:, 1] = np.random.randint(1, high = 1001, size = inputs.Parameters.NumberOfMonteCarloScenarios) for i in range(1, inputs.Parameters.NumberOfMonteCarloScenarios): xmap[i, 1] += xmap[i - 1][1] # status updater shows progress sUpdt = model.StatusUpdater(len(xmap) * (3 if inputs.Parameters.CalculationOfEffectiveIndicators else 1), loud=True) sUpdt.start() mbs_m = None mbs_p = None if inputs.Parameters.CalculationOfEffectiveIndicators: # --- compute +/-100bp scenarios if needed --- mbs_m, res_m, scr_m = model.runpar(modelrun, xmap, inputs, lld_df, seed=seeds_cfm, delta=-100, max_years=inputs.Parameters.ModelingHorizon, upd_queue=sUpdt.upd_que) mbs_p, res_p, scr_p = model.runpar(modelrun, xmap, inputs, lld_df, seed=seeds_cfm, delta=+100, max_years=inputs.Parameters.ModelingHorizon, upd_queue=sUpdt.upd_que) # --- compute base scenario and remember central scenario --- mbs_c, res_c, scr_c = model.runpar(modelrun, xmap, inputs, lld_df, seed=seeds_cfm, max_years=inputs.Parameters.ModelingHorizon, upd_queue=sUpdt.upd_que) sUpdt.stop() # ====== MBS MODEL ====== try: print time.strftime('%Y-%m-%d %H:%M:%S'), 'Calculating price ...', mbs_res, price_hist = model.mbs.stats(mbs_c, mbs_m, mbs_p, inputs.Parameters) print '\r', time.strftime('%Y-%m-%d %H:%M:%S'), 'Calculating price - OK' except Exception as e: print "Error while pricing cashflows:", str(e), '(please check for correct mbs inputs parameters)' # --- dump main dataframes --- try: print time.strftime('%Y-%m-%d %H:%M:%S'), 'Saving results ...', if not os.path.isdir(workdir): os.makedirs(workdir) if args.dumpall is None: if inputs.Parameters.CalculationOfEffectiveIndicators: # save -100 bp scr_m = scr_m.mean(axis=0, level=1) res_m = res_m.mean(axis=0, level=1) mbs_m = mbs_m.mean(axis=0, level=1) # save +100 bp scr_p = scr_p.mean(axis=0, level=1) res_p = res_p.mean(axis=0, level=1) mbs_p = mbs_p.mean(axis=0, level=1) # save base scr_c = scr_c.mean(axis=0, level=1) res_c = res_c.mean(axis=0, level=1) mbs_c = mbs_c.mean(axis=0, level=1) if inputs.Parameters.CalculationOfEffectiveIndicators: # save -100 bp scr_m.to_csv(os.path.join(workdir, 'macroscr_m100bp.csv'), sep='\t', encoding=args.encoding) res_m.to_csv(os.path.join(workdir, 'cashflow_m100bp.csv'), sep='\t', encoding=args.encoding) mbs_m.to_csv(os.path.join(workdir, 'mbsflows_m100bp.csv'), sep='\t', encoding=args.encoding) # save +100 bp scr_p.to_csv(os.path.join(workdir, 'macroscr_p100bp.csv'), sep='\t', encoding=args.encoding) res_p.to_csv(os.path.join(workdir, 'cashflow_p100bp.csv'), sep='\t', encoding=args.encoding) mbs_p.to_csv(os.path.join(workdir, 'mbsflows_p100bp.csv'), sep='\t', encoding=args.encoding) # save base scr_c.to_csv(os.path.join(workdir, 'macroscr_central.csv'), sep='\t', encoding=args.encoding) res_c.to_csv(os.path.join(workdir, 'cashflow_central.csv'), sep='\t', encoding=args.encoding) mbs_c.to_csv(os.path.join(workdir, 'mbsflows_central.csv'), sep='\t', encoding=args.encoding) # save other mbs_res.to_csv(os.path.join(workdir, 'pricing_result.csv'), sep='\t', encoding=args.encoding) print '\r', time.strftime('%Y-%m-%d %H:%M:%S'), 'Results saved to "%s"' % workdir except Exception as e: print "Error while saving results:", str(e)
return result if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__) parser.add_argument( 'url', help='URL to parse', ) parser.add_argument('-f', '--format', choices=list(Format.formatter), default='json', help='output format') parser.add_argument( '-w', '--body-width', type=int, default=None, help='character offset at which to wrap lines for plain-text') parser.add_argument('-p', '--mercury-path', default='/usr/local/bin/mercury-parser', help='path to mercury-parser command line driver') args = parser.parse_args() obj = main(mercury(args.url, args.mercury_path), args.body_width) print(Format.formatter[args.format](obj))