def main(starting_cash, start_date, end_date, order_file, value_file): print "Starting Cash: {}".format(starting_cash) print "Start date: {} End Date: {}".format(start_date, end_date) print "Order File: {} Value File: {}".format(order_file, value_file) dataobj = da.DataAccess('Yahoo') ls_symbols = dataobj.get_symbols_from_list('sp5002012') ls_symbols.append('SPY') # ls_symbols.append('$SPX') ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] ldt_timestamps = du.getNYSEdays(start_date, end_date, dt.timedelta(hours=16)) ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) for s_key in ls_keys: d_data[s_key] = d_data[s_key].fillna(method='ffill') d_data[s_key] = d_data[s_key].fillna(method='bfill') d_data[s_key] = d_data[s_key].fillna(1.0) na_ts_bollinger = bollinger_event_study.get_bollinger_band(d_data['close'], ldt_timestamps) # df_events = bollinger_event_study.find_events(ls_symbols, na_ts_bollinger) # print "Creating Study" # ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20, # s_filename='BollingerEventStudy.pdf', b_market_neutral=True, b_errorbars=True, # s_market_sym='SPY') # ldt_timestamps = na_ts_bollinger.index # print len(ldt_timestamps) count = 0 with open(order_file, 'wb') as outfile: writer = csv.writer(outfile, delimiter=',') for sym in ls_symbols: for i in range(1, len(ldt_timestamps)): bol_val_today = na_ts_bollinger[sym].ix[ldt_timestamps[i]] bol_val_yest = na_ts_bollinger[sym].ix[ldt_timestamps[i-1]] bol_val_spx = na_ts_bollinger['SPY'].ix[ldt_timestamps[i]] if bol_val_today < -2.0 and bol_val_yest >= -2.0 and bol_val_spx >= 1.3: out_data = generate_output_data(ldt_timestamps[i], sym) writer.writerow(out_data) end_ts = ldt_timestamps[i] if i < len(ldt_timestamps) - 5: end_ts = ldt_timestamps[i+5] else: end_ts = ldt_timestamps[-1] out_data = generate_output_data(end_ts, sym, buy=False) writer.writerow(out_data) count += 1 print "Number of events: {}".format(count) print "Performing fund simulation..." marketsim.main(starting_cash, order_file, value_file) print "Analyzing fund..." analyze.main(value_file, '$SPX')
def assertResultForModule(self, module, expected, variant=None): s = StringIO.StringIO() analyze.main(module, self.o_dir, self.t_dir, s, variant, timer_stream=None) self.assertMultiLineEqual(expected, s.getvalue())
def main(starting_cash, start_date, end_date, order_file, value_file): print "Starting Cash: {}".format(starting_cash) print "Start date: {} End Date: {}".format(start_date, end_date) print "Order File: {} Value File: {}".format(order_file, value_file) ldt_timestamps = du.getNYSEdays(start_date, end_date, dt.timedelta(hours=16)) dataobj = da.DataAccess('Yahoo') ls_symbols = dataobj.get_symbols_from_list('sp5002012') ls_symbols.append('$SPX') ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) for s_key in ls_keys: d_data[s_key] = d_data[s_key].fillna(method='ffill') d_data[s_key] = d_data[s_key].fillna(method='bfill') d_data[s_key] = d_data[s_key].fillna(1.0) close_price_data = d_data['actual_close'] df_events = event_study.find_events(ls_symbols, close_price_data) # print "Creating Study" # ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20, # s_filename='MyEventStudy.pdf', b_market_neutral=True, b_errorbars=True, # s_market_sym='$SPX') # Time stamps for the event range ldt_timestamps = close_price_data.index with open(order_file, 'wb') as outfile: writer = csv.writer(outfile, delimiter=',') for sym in ls_symbols: for i in range(1, len(ldt_timestamps)): price_today = close_price_data[sym].ix[ldt_timestamps[i]] price_yest = close_price_data[sym].ix[ldt_timestamps[i-1]] if price_yest >= 7.0 and price_today < 7.0: out_data = generate_output_data(ldt_timestamps[i], sym) writer.writerow(out_data) end_ts = ldt_timestamps[i] if i > ldt_timestamps.shape: end_ts = ldt_timestamps[-1] else: end_ts = ldt_timestamps[i+5] out_data = generate_output_data(end_ts, sym, buy=False) writer.writerow(out_data) print "Performing fund simulation..." marketsim.main(starting_cash, order_file, value_file) print "Analyzing fund..." analyze.main(value_file, '$SPX')
def kill_handler(signum, frame): """Kill tcpdump and then analyze pcap.""" if tcpdump_proc: tcpdump_proc.terminate() # Analyze pcap with bro run_bro('-r %s' % (tracefile), logdir) analyze_logs(logdir, outfile=outfile) # Now run leak detector print "Analyzing captured network traffic" utils.remove_temp_dir('bro_logs') analyze.main(outfile, "%s.analyzed" % outfile)
def kill_handler(signum, frame): """Kill bro.""" logging.info('Exiting...') global bro_proc global log_proc if bro_proc: bro_proc.terminate() if not logdir: utils.remove_temp_dir('bro_logs') analyze_logs(logdir, outfile=outfile) print "Analyzing captured network traffic (this may take a second...)" analyze.main(outfile, "%s.analyzed"%outfile)
def analyze_chars(char_names): start_time = time.time() wx.CallAfter(app.PySpy.list.DeleteAllItems) try: outlist = analyze.main(char_names) duration = round(time.time() - start_time, 1) reportstats.ReportStats(outlist, duration).start() if outlist is not None: wx.CallAfter(app.PySpy.updateList, outlist, duration) else: statusmsg.push_status( "No valid character names found. Please try again...") except Exception: Logger.error("Failed to collect character information. Clipboard " "content was: " + str(char_names), exc_info=True)
def listen_print_loop(responses, stream): responses = (r for r in responses if (r.results and r.results[0].alternatives)) num_chars_printed = 0 for response in responses: if not response.results: continue # The `results` list is consecutive. For streaming, we only care about # the first result being considered, since once it's `is_final`, it # moves on to considering the next utterance. result = response.results[0] if not result.alternatives: continue # Display the transcription of the top alternative. top_alternative = result.alternatives[0] transcript = top_alternative.transcript # Display interim results, but with a carriage return at the end of the # line, so subsequent lines will overwrite them. # # If the previous result was longer than this one, we need to print # some extra spaces to overwrite the previous result overwrite_chars = ' ' * (num_chars_printed - len(transcript)) if not result.is_final: sys.stdout.write(transcript + overwrite_chars + '\r') sys.stdout.flush() num_chars_printed = len(transcript) else: if re.search(r'\b(exit|quit)\b', transcript, re.I): print('Exiting..') stream.closed = True break print(transcript + overwrite_chars) # print(type(transcript + overwrite_chars)) return transcript + overwrite_chars, analyze.main( 'sentiment', transcript + overwrite_chars) # Exit recognition if any of the transcribed phrases could be # one of our keywords. num_chars_printed = 0
def main(options): if options.noLoop: retCode = 0 feds1 = fedList(options.feds1) feds2 = fedList(options.feds2) elif options.profile: pr = cProfile.Profile() retCode, feds1, feds2 = pr.runcall(analyze.main, options) pr.print_stats("time") else: retCode, feds1, feds2 = analyze.main(options) if feds2 and 0 <= options.dump: analyze.printChannelSummary(options.outputFile) if not options.noPlot: graphs.main(options.outputFile, feds1, feds2) return retCode, feds1, feds2
def analyze_chars(char_names): conn_mem, cur_mem = db.connect_memory_db() conn_dsk, cur_dsk = db.connect_persistent_db() start_time = time.time() wx.CallAfter(app.PySpy.grid.ClearGrid) try: outlist = analyze.main(char_names, conn_mem, cur_mem, conn_dsk, cur_dsk) duration = round(time.time() - start_time, 1) reportstats.ReportStats(outlist, duration).start() if outlist is not None: # Need to use keyword args as sortOutlist can also get called # by event handler which would pass event object as first argument. wx.CallAfter(app.PySpy.sortOutlist, outlist=outlist, duration=duration) else: statusmsg.push_status( "No valid character names found. Please try again...") except Exception: Logger.error("Failed to collect character information. Clipboard " "content was: " + str(char_names), exc_info=True)
import analyze import differ import send_email if __name__ == '__main__': analyze.main() differ.main() send_email.main()
def tryToQuantifyWithVariant(): module = translationese.lexical_variety analyze.main(module, self.o_dir, self.t_dir, variant=5)
def tryToQuantifyWithVariant(): module = translationese.contractions analyze.main(module, self.o_dir, self.t_dir, variant=0)
def main(parsed_args): """Primary update cycle""" # Scrape and load the data from goko, proceeding from the # previous day backwards, until no games are inserted today = datetime.date.today() log.info("Starting scrape for raw games") dates = utils.daterange(datetime.date(2010,10,14), today, reverse=True) for date in dates: log.info("Invoking scrape_raw_games async task for %s", date) async_result = watch_and_log(background.tasks.scrape_raw_games.s(date)) inserted = async_result.get() if inserted is None: log.info("Nothing processed for %s", date) elif inserted == 0: log.info("No games inserted for %s", date) break # Invoke the analyze script log.info("Starting analyze") analyze.main(parsed_args) # Check for goals log.info("Starting search for goals acheived") # Check for game_stats log.info("Starting game_stats summarization") dates = utils.daterange(datetime.date(2010,10,14), today, reverse=True) for date in dates: log.info("Invoking calc_goals_for_days async task for %s", date) async_result = watch_and_log(background.tasks.calc_goals_for_days.s([date])) inserted = async_result.get() log.info("Invoking summarize_game_stats_for_days async task for %s", date) async_result = watch_and_log(background.tasks.summarize_game_stats_for_days.s([date])) if inserted == 0: log.info("No games parsed for goals on %s", date) break # Invoke the count_buys script log.info("Counting buys") count_buys.main(parsed_args) # Invoke the run_trueskill script log.info("Calculating trueskill") run_trueskill.main(parsed_args) # Invoke the optimal_card_ratios script log.info("Calculating optimal card ratios") optimal_card_ratios.main(parsed_args) # Invoke the goal_stats script log.info("Calculating goal stats") goal_stats.main(parsed_args) # Invoke the scrape_leaderboard script log.info("Scraping the leaderboard") scrape_leaderboard.main() # Invoke the load_leaderboard script log.info("Loading the leaderboard") load_leaderboard.main() log.info("Starting analyze2") # This is slow. Is it fast enough on cr? analyze2.main(parsed_args) log.info("Done with the update.py process")
def main(parsed_args): """Primary update cycle""" # Scrape and load the data from isotropic, proceeding from the # current day backwards, until no games are inserted log.info("Starting scrape for raw games") for date in utils.daterange(datetime.date(2010, 10, 15), datetime.date.today(), reverse=True): log.info("Invoking scrape_raw_games async task for %s", date) async_result = watch_and_log(background.tasks.scrape_raw_games.s(date)) inserted = async_result.get() if inserted is None: log.info("Nothing processed for %s", date) elif inserted == 0: log.info("No games inserted for %s", date) break # Invoke the analyze script log.info("Starting analyze") analyze.main(parsed_args) # Check for goals log.info("Starting search for goals acheived") for date in utils.daterange(datetime.date(2010, 10, 15), datetime.date.today(), reverse=True): log.info("Invoking calc_goals_for_days async task for %s", date) async_result = watch_and_log(background.tasks.calc_goals_for_days.s([date])) inserted = async_result.get() if inserted == 0: log.info("No games parsed for goals on %s", date) break # Check for game_stats log.info("Starting game_stats summarization") for date in utils.daterange(datetime.date(2010, 10, 15), datetime.date.today(), reverse=True): log.info("Invoking summarize_game_stats_for_days async task for %s", date) async_result = watch_and_log(background.tasks.summarize_game_stats_for_days.s([date])) inserted = async_result.get() if inserted == 0: log.info("No new games summarized on %s", date) break # Invoke the count_buys script log.info("Counting buys") count_buys.main(parsed_args) # Invoke the run_trueskill script log.info("Calculating trueskill") run_trueskill.main(parsed_args) # Invoke the optimal_card_ratios script log.info("Calculating optimal card ratios") optimal_card_ratios.main(parsed_args) # Invoke the goal_stats script log.info("Calculating goal stats") goal_stats.main(parsed_args) # Invoke the scrape_leaderboard script log.info("Scraping the leaderboard") scrape_leaderboard.main() # Invoke the load_leaderboard script log.info("Loading the leaderboard") load_leaderboard.main() log.info("Done with the update.py process")
def run(self): while self.runningFlag.isSet(): time.sleep(self.analyze_interval) analyze_logs(self.log_dir, outfile=self.outfile) analyze.main(self.outfile, "%s.analyzed"%self.outfile)
try: print "**Calling marketsim..." print "****Creating ", out_results args = ["-fv", cash_arg, out_trades, out_results ] print "marketsim " + " ".join(args) ret = marketsim.main([argv[0]] + args) except marketsim.Usage, err: raise Usage(err) if ret == 0: try: print "**Calling analyze..." print "****Creating ", out_analysis args = ["-fv", "--portfolio=" + out_results, "--out=" + out_analysis] + compare_args print "analyze " + " ".join(args) ret = analyze.main([argv[0]] + args) except analyze.Usage, err: raise Usage(err) return ret if __name__ == "__main__": ret = 1 try: ret = main() except Usage, err: print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg) print >> sys.stderr, "\t for help use --help" ret = 2 sys.exit(ret)