def compute_default_pumps(): """ Create a new json file (data/securities/cache_pumps_default.json) that is the representation of a dict where the keys are the symbole names and the values are a tic with the results of the pumps.find_pumps_easy method being called with: min_quiet_days=3 growth_tol=1 """ import pumps folder = 'data/securities/' r = {} for s in seek_securities(folder): if 'quotient_metrics' in s or 'pump_ranks' in s: continue try: intervals = pumps.find_pumps_easy( str(s), orig_dir="data/securities", cache_dir="data/securities/cached", min_quiet_days=3, growth_tol=1, silent=True, ) intervals = pumps.to_dicts(intervals) intervals['bottom'] = [0] * len(intervals['start']) r[s] = intervals except: pass with file('data/securities/cache_pumps_default.json', 'w') as ff: json.dump(r, ff)
def compute_pump_rank(): import pumps folder = 'data/securities/' r = [] pfounds = {_file.split("_")[1]: _file.split("_")[2].replace(".csv", '') for _file in os.listdir(folder+"cached") if "pumps" in _file} for s in seek_securities(folder): # import pdb; pdb.set_trace() if s in pfounds: r.append("%s,%s" % (s, pfounds[s])) else: try: start_dates, last_quiet_dates, end_dates, start_prices, last_quiet_prices, end_prices = pumps.find_pumps_easy( s, orig_dir="data/securities", cache_dir="data/securities/cached", min_quiet_days=2 ) founds = pd.DataFrame({ start_dates: start_dates, last_quiet_dates: last_quiet_dates, end_dates: end_dates, start_prices: start_prices, last_quiet_prices: start_prices, end_prices: start_prices }) r.append("%s,%i" % (s, len(founds))) founds.to_csv("data/securities/cached/pumps_%s_%i.csv" % (s, len(founds)), date_format='%Y-%m-%d') except: pass with file('data/securities/pump_ranks.csv', 'w') as ff: ff.write("\n".join((r)))
def serve_peaks(): args = request.args start_dates, last_quiet_dates, end_dates, start_prices, last_quiet_prices, end_prices = pumps.find_pumps_easy( request.args['s'], orig_dir="data/securities", cache_dir="data/securities/cached", min_quiet_days=int(args['min_quiet_days']), quiet_tol=float(args['quiet_tol'] if '.' in args['quiet_tol'] else args['quiet_tol'] + '.'), min_growth_days=int(args['min_growth_days']), max_growth_days=int(args['max_growth_days']), growth_tol=float(args['growth_tol'] if '.' in args['growth_tol'] else args['growth_tol'] + '.'), silent=True, ) conv = lambda x: utils.to_seconds(pd.to_datetime(x)) res = { 'results': sorted( [{'start': s, 'end': e} for s, e in zip( sorted(map(utils.to_seconds, start_dates)), sorted(map(utils.to_seconds, end_dates))) if s and s > conv(config.date_range[0]) and s < conv(config.date_range[1]) ] ) } return jsonify(res)
def compute_pump_rank(): import pumps folder = 'data/securities/' r = [] pfounds = { _file.split("_")[1]: _file.split("_")[2].replace(".csv", '') for _file in os.listdir(folder + "cached") if "pumps" in _file } for s in seek_securities(folder): # import pdb; pdb.set_trace() if s in pfounds: r.append("%s,%s" % (s, pfounds[s])) else: try: start_dates, last_quiet_dates, end_dates, start_prices, last_quiet_prices, end_prices = pumps.find_pumps_easy( s, orig_dir="data/securities", cache_dir="data/securities/cached", min_quiet_days=2) founds = pd.DataFrame({ start_dates: start_dates, last_quiet_dates: last_quiet_dates, end_dates: end_dates, start_prices: start_prices, last_quiet_prices: start_prices, end_prices: start_prices }) r.append("%s,%i" % (s, len(founds))) founds.to_csv("data/securities/cached/pumps_%s_%i.csv" % (s, len(founds)), date_format='%Y-%m-%d') except: pass with file('data/securities/pump_ranks.csv', 'w') as ff: ff.write("\n".join((r)))
def serve_peaks(): args = request.args start_dates, last_quiet_dates, end_dates, start_prices, last_quiet_prices, end_prices = pumps.find_pumps_easy( request.args['s'], orig_dir="data/securities", cache_dir="data/securities/cached", min_quiet_days=int(args['min_quiet_days']), quiet_tol=float(args['quiet_tol'] if '.' in args['quiet_tol'] else args['quiet_tol'] + '.'), min_growth_days=int(args['min_growth_days']), max_growth_days=int(args['max_growth_days']), growth_tol=float(args['growth_tol'] if '.' in args['growth_tol'] else args['growth_tol'] + '.'), silent=True, ) conv = lambda x: utils.to_seconds(pd.to_datetime(x)) res = { 'results': sorted([{ 'start': s, 'end': e } for s, e in zip(sorted(map(utils.to_seconds, start_dates)), sorted(map(utils.to_seconds, end_dates))) if s and s > conv(config.date_range[0]) and s < conv(config.date_range[1])]) } return jsonify(res)