def run_predictor(self, nn_index): predictor = self.predictors[nn_index] config = self.predictor_configs[nn_index] normalize = config['type'] == 'nn' prices = predictor.get_latest_prices(normalize=normalize) prices = prices[(len(prices) - predictor.datasetinputs):(len(prices) + 1)] recommend, nn_price, last_sample, projected_change_pct = predictor.predict( prices) confidence = predictor.confidence() if config['type'] == 'nn': clf = None made_by = predictor else: clf = predictor made_by = None print_and_log( "(t)({})---- ({} w. {}% conf) ---- price from {} => {}({}% change); " .format(nn_index, recommend, round(confidence, 0), round(last_sample, 4), round(nn_price, 4), int(projected_change_pct * 100.0))) tr = TradeRecommendation(symbol=config['symbol'], made_on=str(prices), made_by=made_by, clf=clf, confidence=confidence, recommendation=recommend, net_amount=-1 if recommend == 'SELL' else (1 if recommend == 'BUY' else 0), created_on_str=str( get_time().strftime('%Y-%m-%d %H:%M'))) tr.save() self.trs[nn_index] = tr return recommend
def handle_open_orders(self): tickers = list(set([o['symbol'] for o in self.predictor_configs])) for ticker in tickers: try: # cancel any filled open orders open_orders = [] if not settings.MAKE_TRADES else self.poo.returnOpenOrders( ticker) for order in open_orders: orderNumber = order['orderNumber'] rate = order['rate'] self.poo.cancel(ticker, orderNumber) print_and_log( '(t) -- handle_open_orders -- canceled stale order {} at rate {}' .format(orderNumber, rate)) for t in Trade.objects.filter(symbol=ticker, orderNumber=orderNumber): t.status = 'canceled' t.save() # update trade history trade_history = [] if not settings.MAKE_TRADES else self.poo.returnTradeHistory( ticker) orderNumbers = [th['orderNumber'] for th in trade_history] for t in Trade.objects.filter(symbol=ticker, orderNumber__in=orderNumbers): t.status = 'fill' t.save() except Exception as e: print_and_log('(t)handle_open_orders: ' + str(e))
def run_predictor(self,nn_index): predictor = self.predictors[nn_index] config = self.predictor_configs[nn_index] normalize = config['type'] == 'nn' prices = predictor.get_latest_prices(normalize=normalize) prices = prices[(len(prices)-predictor.datasetinputs):(len(prices)+1)] recommend, nn_price, last_sample, projected_change_pct = predictor.predict(prices) confidence = predictor.confidence() if config['type'] == 'nn': clf = None made_by = predictor else: clf = predictor made_by = None print_and_log("(t)({})---- ({} w. {}% conf) ---- price from {} => {}({}% change); ".format(nn_index,recommend, round(confidence,0), round(last_sample,4), round(nn_price,4), int(projected_change_pct * 100.0))) tr = TradeRecommendation(symbol=config['symbol'], made_on=str(prices), made_by=made_by, clf=clf, confidence=confidence, recommendation=recommend, net_amount = -1 if recommend == 'SELL' else ( 1 if recommend == 'BUY' else 0 ), created_on_str = str(get_time().strftime('%Y-%m-%d %H:%M')) ) tr.save() self.trs[nn_index] = tr return recommend
def do_classifier_test(name, ticker, data_set_inputs, granularity, min_back, timedelta_back): try: ct = ClassifierTest(name=name, type='mock', symbol=ticker, datasetinputs=data_set_inputs, granularity=granularity, minutes_back=min_back, timedelta_back_in_granularity_increments=timedelta_back) ct.get_classifier() ct.save() return_data = "(ct) {} {} {} {} {} {} returned {}% correct ".format(name, ticker, data_set_inputs, granularity, min_back, timedelta_back, ct.percent_correct) print_and_log(return_data) # Hack to only graph successful charts, until we figure out this warning # http://bits.owocki.com/010Z1M3d170p/Image%202016-03-02%20at%208.30.17%20AM.png if ct.percent_correct > 60 or not settings.MAKE_TRADES: ct.graph(ct.graph_url()) return return_data except Exception as e: return "Exception in {} {} {} {} {} {}: {}".format(name, ticker, data_set_inputs, granularity, min_back, timedelta_back, str(e))
def do_classifier_test(name, ticker, data_set_inputs, granularity, min_back, timedelta_back): try: ct = ClassifierTest( name=name, type='mock', symbol=ticker, datasetinputs=data_set_inputs, granularity=granularity, minutes_back=min_back, timedelta_back_in_granularity_increments=timedelta_back) ct.get_classifier() ct.save() return_data = "(ct) {} {} {} {} {} {} returned {}% correct ".format( name, ticker, data_set_inputs, granularity, min_back, timedelta_back, ct.percent_correct) print_and_log(return_data) # Hack to only graph successful charts, until we figure out this warning # http://bits.owocki.com/010Z1M3d170p/Image%202016-03-02%20at%208.30.17%20AM.png if ct.percent_correct > 60 or not settings.MAKE_TRADES: ct.graph(ct.graph_url()) return return_data except Exception as e: return "Exception in {} {} {} {} {} {}: {}".format( name, ticker, data_set_inputs, granularity, min_back, timedelta_back, str(e))
def handle_open_orders(self): tickers = list(set([ o['symbol'] for o in self.predictor_configs ])) for ticker in tickers: try: #cancel any filled open orders open_orders = [] if not settings.MAKE_TRADES else self.poo.returnOpenOrders(ticker) for order in open_orders: orderNumber = order['orderNumber'] rate = order['rate'] self.poo.cancel(ticker,orderNumber) print_and_log('(t) -- handle_open_orders -- canceled stale order {} at rate {}'.format(orderNumber,rate)) for t in Trade.objects.filter(symbol=ticker,orderNumber=orderNumber): t.status = 'canceled' t.save() #update trade history trade_history = [] if not settings.MAKE_TRADES else self.poo.returnTradeHistory(ticker) orderNumbers = [th['orderNumber'] for th in trade_history] for t in Trade.objects.filter(symbol=ticker,orderNumber__in=orderNumbers): t.status = 'fill' t.save() except Exception as e: print_and_log('(t)handle_open_orders: ' + str(e) )
def handle(self, *args, **options): from history.poloniex import poloniex from history.models import Price import time poo = poloniex(settings.API_KEY, settings.API_SECRET) if settings.MAKE_TRADES: time.sleep(40) for t in Trade.objects.filter(created_on__lt=datetime.datetime.now(), status='scheduled'): #bid right below the lowest ask, or right above the highest bid so that our orders get filled action = t.type price = Price.objects.filter( symbol=t.symbol).order_by('-created_on').first() if action == 'sell': rate = price.lowestask * 0.999 else: rate = price.highestbid * 1.001 t.price = rate if action == 'buy': try: response = {} if not settings.MAKE_TRADES else poo.buy( t.symbol, rate, t.amount) except Exception as e: print_and_log('(st)act_upon_recommendation:buy: ' + str(e)) elif action == 'sell': try: response = {} if not settings.MAKE_TRADES else poo.sell( t.symbol, rate, t.amount) except Exception as e: print_and_log('(st)act_upon_recommendation:sell: ' + str(e)) t.response = response, t.orderNumber = response.get('orderNumber', '') t.status = 'error' if response.get('error', False) else 'open' t.calculatefees() t.calculate_exchange_rates() t.save() ot = t.opposite_trade ot.opposite_price = rate ot.net_profit = ((rate * t.amount) - (ot.price * ot.amount) if action == 'sell' else (ot.price * ot.amount) - (rate * t.amount)) - ot.fee_amount - t.fee_amount ot.calculate_profitability_exchange_rates() ot.save()
def handle(self, *args, **options): from history.poloniex import poloniex from history.models import Price import time poo = poloniex(settings.API_KEY, settings.API_SECRET) if settings.MAKE_TRADES: time.sleep(40) for t in Trade.objects.filter(created_on__lt=datetime.datetime.now(), status='scheduled'): # bid right below the lowest ask, or right above the highest bid so that our orders get filled action = t.type price = Price.objects.filter(symbol=t.symbol).order_by('-created_on').first() if action == 'sell': rate = price.lowestask * 0.999 else: rate = price.highestbid * 1.001 t.price = rate if action == 'buy': try: response = {} if not settings.MAKE_TRADES else poo.buy(t.symbol, rate, t.amount) except Exception as e: print_and_log('(st)act_upon_recommendation:buy: ' + str(e)) elif action == 'sell': try: response = {} if not settings.MAKE_TRADES else poo.sell(t.symbol, rate, t.amount) except Exception as e: print_and_log('(st)act_upon_recommendation:sell: ' + str(e)) t.response = response t.orderNumber = response.get('orderNumber', '') t.status = 'error' if response.get('error', False) else 'open' t.calculatefees() t.calculate_exchange_rates() t.save() ot = t.opposite_trade ot.opposite_price = rate ot.net_profit = ((rate * t.amount) - (ot.price * ot.amount) if action == 'sell' else (ot.price * ot.amount) - (rate * t.amount)) - ot.fee_amount - t.fee_amount ot.calculate_profitability_exchange_rates() ot.save()
def get_traders(self): predictors = {} self.confidence = {} self.trs = {} self.predictor_configs.reverse() for i in range(0, len(self.predictor_configs)): config = self.predictor_configs[i] if config['type'] == 'nn': pt = PredictionTest() pt.type = 'real' pt.symbol = config['symbol'] pt.datasetinputs = config['datasetinputs'] pt.hiddenneurons = 5 pt.minutes_back = 100 pt.epochs = 1000 pt.momentum = 0.1 pt.granularity = config['granularity'] pt.bias = True pt.learningrate = 0.05 pt.weightdecay = 0.0 pt.recurrent = True pt.timedelta_back_in_granularity_increments = 0 pt.save() predict_runtime = pt.predict_runtime() predict_confidence = pt.confidence() print_and_log( "(t)predicted trainingtime for nn #{} {}: {}s, predicted confidence: {}%" .format(i, config['name'], round(predict_runtime, 1), int(predict_confidence))) pt.get_nn(train=settings.MAKE_TRADES) print_and_log("(t)done training") predictors[i] = pt self.confidence[i] = predict_confidence else: ct = ClassifierTest(name=config['name'], type='real', symbol=config['symbol'], datasetinputs=config['datasetinputs'], granularity=config['granularity'], minutes_back=config['minutes_back'], timedelta_back_in_granularity_increments=0) predict_runtime = ct.predict_runtime() predict_confidence = ct.confidence() print_and_log( "(t)predicted trainingtime for nn #{} {}: {}s, predicted confidence: {}%" .format(i, config['name'], round(predict_runtime, 1), int(predict_confidence))) ct.get_classifier(test=False) print_and_log("(t)done training") predictors[i] = ct self.confidence[i] = predict_confidence ct.save() self.predictors = predictors return self.predictors
def do_prediction_test(ticker, hidden_layers, min_back, epochs, granularity, datasetinputs, learningrate, bias, momentum, recurrent, weightdecay, timedelta_back_in_granularity_increments): try: predict_v2(ticker, hidden_layers=hidden_layers, NUM_MINUTES_BACK=min_back, NUM_EPOCHS=epochs, granularity_minutes=granularity, datasetinputs=datasetinputs, learningrate=learningrate, bias=bias, momentum=momentum, recurrent=recurrent, weightdecay=weightdecay, timedelta_back_in_granularity_increments=timedelta_back_in_granularity_increments) except Exception as e: print_and_log("(p)" + str(e))
def do_prediction_test(ticker, hidden_layers, min_back, epochs, granularity, datasetinputs, learningrate, bias, momentum, recurrent, weightdecay, timedelta_back_in_granularity_increments): try: predict_v2(ticker, hidden_layers=hidden_layers, NUM_MINUTES_BACK=min_back, NUM_EPOCHS=epochs, granularity_minutes=granularity, datasetinputs=datasetinputs, learningrate=learningrate, bias=bias, momentum=momentum, recurrent=recurrent, weightdecay=weightdecay, timedelta_back_in_granularity_increments= timedelta_back_in_granularity_increments) except Exception as e: print_and_log("(p)" + str(e))
def get_traders(self): predictors = {} self.confidence = {} self.trs = {} self.predictor_configs.reverse() for i in range(0, len(self.predictor_configs)): config = self.predictor_configs[i] if config['type'] == 'nn': pt = PredictionTest() pt.type = 'real' pt.symbol = config['symbol'] pt.datasetinputs = config['datasetinputs'] pt.hiddenneurons = 5 pt.minutes_back = 100 pt.epochs = 1000 pt.momentum = 0.1 pt.granularity = config['granularity'] pt.bias = True pt.learningrate = 0.05 pt.weightdecay = 0.0 pt.recurrent = True pt.timedelta_back_in_granularity_increments = 0 pt.save() predict_runtime = pt.predict_runtime() predict_confidence = pt.confidence() print_and_log("(t)predicted trainingtime for nn #{} {}: {}s, predicted confidence: {}%". format(i, config['name'], round(predict_runtime, 1), int(predict_confidence))) pt.get_nn(train=settings.MAKE_TRADES) print_and_log("(t)done training") predictors[i] = pt self.confidence[i] = predict_confidence else: ct = ClassifierTest(name=config['name'], type='real', symbol=config['symbol'], datasetinputs=config['datasetinputs'], granularity=config['granularity'], minutes_back=config['minutes_back'], timedelta_back_in_granularity_increments=0) predict_runtime = ct.predict_runtime() predict_confidence = ct.confidence() print_and_log("(t)predicted trainingtime for nn #{} {}: {}s, predicted confidence: {}%". format(i, config['name'], round(predict_runtime, 1), int(predict_confidence))) ct.get_classifier(test=False) print_and_log("(t)done training") predictors[i] = ct self.confidence[i] = predict_confidence ct.save() self.predictors = predictors return self.predictors
def handle(self, *args, **options): ticker_options = ['BTC_ETH', 'USDT_BTC'] min_back_options = [100, 1000, 24 * 60, 24 * 60 * 2] granularity_options = [10, 15, 20, 30, 40, 50, 60, 120, 240] if not settings.MAKE_TRADES: granularity_options = [1] datasetinput_options = [2] # TODO: enable more than just 1 type timedelta_back_in_granularity_increments_options = [10, 30, 60, 100, 1000] # sets how far apart (in granularity increments) the datasets are name_options = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis", "Quadratic Discriminant Analysis"] for ticker in ticker_options: for min_back in min_back_options: for granularity in granularity_options: for datasetinputs in datasetinput_options: for timedelta_back_in_granularity_increments in timedelta_back_in_granularity_increments_options: for name in name_options: try: ct = ClassifierTest(name=name, type='mock', symbol=ticker, datasetinputs=datasetinputs, granularity=granularity, minutes_back=min_back, timedelta_back_in_granularity_increments=timedelta_back_in_granularity_increments) ct.get_classifier() ct.save() print_and_log("(ct) {} {} {} {} {} {} returned {}% corrrect ".format(name, ticker, datasetinputs, granularity, min_back, timedelta_back_in_granularity_increments, ct.percent_correct)) if ct.percent_correct > 60 or not settings.MAKE_TRADES: # hack to only graph successful charts, until we figure out this warning http://bits.owocki.com/010Z1M3d170p/Image%202016-03-02%20at%208.30.17%20AM.png ct.graph(ct.graph_url()) except Exception as e: print("exception:" + str(e))
def handle(self, *args, **options): # setup self.poo = poloniex(settings.API_KEY, settings.API_SECRET) self.setup() print_and_log("(t){} ---- ****** STARTING TRAINERS ******* ".format( str(datetime.datetime.now()))) self.get_traders() print_and_log( "(t){} ---- ****** DONE TRAINING ALL TRAINERS ******* ".format( str(datetime.datetime.now()))) while True: # TLDR -- which NNs should run at this granularity? should_run = [] recommendations = dict.fromkeys(range(0, len(self.predictors))) for i in range(0, len(self.predictor_configs)): config = self.predictor_configs[i] if (int(get_utc_unixtime() / 60) % config['granularity'] == 0 and datetime.datetime.now().second < 1): should_run.append(i) # TLDR -- update open orders bfore placing new ones if len(should_run) > 0: self.handle_open_orders() # TLDR -- run the NNs specified at this granularity for i in should_run: config = self.predictor_configs[i] recommend = self.run_predictor(i) recommendations[i] = recommend time.sleep(1) # TLDR - act upon recommendations for i in range(0, len(recommendations)): recommendation = recommendations[i] config = self.predictor_configs[i] if recommendation is not None: print_and_log("(t)recommendation {} - {} : {}".format( i, str(config['name']), recommendation)) self.act_upon_recommendation(i, recommendation) # TLDR - cleanup and stats if len(should_run) > 0: pct_buy = round(100.0 * sum(recommendations[i] == 'BUY' for i in recommendations) / len(recommendations)) pct_sell = round(100.0 * sum(recommendations[i] == 'SELL' for i in recommendations) / len(recommendations)) print_and_log("(t)TLDR - {}% buy & {}% sell: {}".format( pct_buy, pct_sell, recommendations)) print_and_log( "(t) ******************************************************************************* " ) print_and_log("(t) portfolio is {}".format( self.get_portfolio_breakdown_pct())) print_and_log( "(t) ******************************************************************************* " ) print_and_log("(t) {} ..... waiting again ..... ".format( str(datetime.datetime.now()))) print_and_log( "(t) ******************************************************************************* " ) time.sleep(1)
def predict_v2(ticker, hidden_layers=15, NUM_MINUTES_BACK=1000, NUM_EPOCHS=1000, granularity_minutes=15, datasetinputs=5, learningrate=0.005, bias=False, momentum=0.1, weightdecay=0.0, recurrent=False, timedelta_back_in_granularity_increments=0): # setup print_and_log( "(p)starting ticker:{} hidden:{} min:{} epoch:{} gran:{} dsinputs:{} learningrate:{} bias:{} momentum:{} weightdecay:{}\ recurrent:{}, timedelta_back_in_granularity_increments:{} ". format(ticker, hidden_layers, NUM_MINUTES_BACK, NUM_EPOCHS, granularity_minutes, datasetinputs, learningrate, bias, momentum, weightdecay, recurrent, timedelta_back_in_granularity_increments)) pt = PredictionTest() pt.type = 'mock' pt.symbol = ticker pt.datasetinputs = datasetinputs pt.hiddenneurons = hidden_layers pt.minutes_back = NUM_MINUTES_BACK pt.epochs = NUM_EPOCHS pt.momentum = momentum pt.granularity = granularity_minutes pt.bias = bias pt.bias_chart = -1 if pt.bias is None else (1 if pt.bias else 0) pt.learningrate = learningrate pt.weightdecay = weightdecay pt.recurrent = recurrent pt.recurrent_chart = -1 if pt.recurrent is None else ( 1 if pt.recurrent else 0) pt.timedelta_back_in_granularity_increments = timedelta_back_in_granularity_increments all_output = "" start_time = int(time.time()) # get neural network & data pt.get_nn() sample_data, test_data = pt.get_train_and_test_data() # output / testing round_to = 2 num_times_directionally_correct = 0 num_times = 0 diffs = [] profitloss_pct = [] for i, val in enumerate(test_data): try: # get NN projection sample = create_sample_row(test_data, i, datasetinputs) recommend, nn_price, last_sample, projected_change_pct = pt.predict( sample) # calculate profitability actual_price = test_data[i + datasetinputs] diff = nn_price - actual_price diff_pct = 100 * diff / actual_price directionally_correct = ((actual_price - last_sample) > 0 and (nn_price - last_sample) > 0) \ or ((actual_price - last_sample) < 0 and (nn_price - last_sample) < 0) if recommend != 'HOLD': profitloss_pct = profitloss_pct + [ abs((actual_price - last_sample) / last_sample) * (1 if directionally_correct else -1) ] if directionally_correct: num_times_directionally_correct = num_times_directionally_correct + 1 num_times = num_times + 1 diffs.append(diff) output = "{}) seq ending in {} => {} (act {}, {}/{} pct off); Recommend: {}; Was Directionally Correct:{}\ ".format(i, round(actual_price, round_to), round(nn_price, round_to), round(actual_price, round_to), round(diff, round_to), round(diff_pct, 1), recommend, directionally_correct) all_output = all_output + "\n" + output except Exception as e: if "list index out of range" not in str(e): print_and_log("(p)" + str(e)) pass avg_diff = sum([abs(diff[0]) for diff in diffs]) / num_times # noqa pct_correct = 100 * num_times_directionally_correct / num_times modeled_profit_loss = sum(profitloss_pct) / len(profitloss_pct) output = 'directionally correct {} of {} times. {}%. avg diff={}, profit={}'.format( num_times_directionally_correct, num_times, round(pct_correct, 0), round(avg_diff, 4), round(modeled_profit_loss, 3)) print_and_log("(p)" + output) all_output = all_output + "\n" + output end_time = int(time.time()) pt.time = end_time - start_time pt.prediction_size = len(diffs) pt.output = all_output pt.percent_correct = pct_correct pt.avg_diff = avg_diff pt.profitloss = modeled_profit_loss pt.profitloss_int = int(pt.profitloss * 100) pt.save() return pt.pk
def handle(self, *args, **options): #setup self.poo = poloniex(settings.API_KEY,settings.API_SECRET) self.setup() print_and_log("(t){} ---- ****** STARTING TRAINERS ******* ".format(str(datetime.datetime.now()))) self.get_traders() print_and_log("(t){} ---- ****** DONE TRAINING ALL TRAINERS ******* ".format(str(datetime.datetime.now()))) while True: #TLDR -- which NNs should run at this granularity? should_run = [] recommendations = dict.fromkeys(range(0,len(self.predictors))) for i in range(0,len(self.predictor_configs)): config = self.predictor_configs[i] if ( int(get_utc_unixtime()/60) % config['granularity'] == 0 and datetime.datetime.now().second < 1): should_run.append(i) #TLDR -- update open orders bfore placing new ones if len(should_run) > 0: self.handle_open_orders() #TLDR -- run the NNs specified at this granularity for i in should_run: pt = self.predictors[i] config = self.predictor_configs[i] recommend = self.run_predictor(i) recommendations[i] = recommend time.sleep(1) #TLDR - act upon recommendations for i in range(0,len(recommendations)): recommendation = recommendations[i] config = self.predictor_configs[i] if recommendation is not None: print_and_log("(t)recommendation {} - {} : {}".format(i, str(config['name']), recommendation)) self.act_upon_recommendation(i,recommendation) #TLDR - cleanup and stats if len(should_run)>0: pct_buy = round(100.0 * sum(recommendations[i] == 'BUY' for i in recommendations) / len(recommendations)) pct_sell = round(100.0 * sum(recommendations[i] == 'SELL' for i in recommendations) / len(recommendations)) print_and_log("(t)TLDR - {}% buy & {}% sell: {}".format(pct_buy, pct_sell, recommendations)) print_and_log("(t) ******************************************************************************* ") print_and_log("(t) portfolio is {}".format(self.get_portfolio_breakdown_pct())) print_and_log("(t) ******************************************************************************* ") print_and_log("(t) {} ..... waiting again ..... ".format(str(datetime.datetime.now()))) print_and_log("(t) ******************************************************************************* ") time.sleep(1)
def act_upon_recommendation(self,i,recommendation): #setup config = self.predictor_configs[i] currencyPair = config['symbol'] #bid right below the lowest ask, or right above the highest bid so that our orders get filled price = Price.objects.filter(symbol=currencyPair).order_by('-created_on').first() if recommendation == 'sell': rate = price.lowestask * 0.999 else: rate = price.highestbid * 1.001 #decide action action = recommendation.lower() if action in ['buy','sell']: amount = self.decide_trade_amount(action,i) #do items response = {} if action == 'buy': try: response = {} if not settings.MAKE_TRADES else self.poo.buy(currencyPair,rate,amount) except Exception as e: print_and_log('(t)act_upon_recommendation:buy: ' + str(e) ) elif action == 'sell': try: response = {} if not settings.MAKE_TRADES else self.poo.sell(currencyPair,rate,amount) except Exception as e: print_and_log('(t)act_upon_recommendation:sell: ' + str(e) ) else: print_and_log('(t)---- act_upon_recommendation declining to act. NNs not decisive') if response or not settings.MAKE_TRADES: print_and_log('(t)---- act_upon_recommendation performing {} for {} units. response from api: {}'.format(action,amount,response)) #make this trade now t = Trade(type=action, symbol=currencyPair, price=rate, amount=amount, response=response, orderNumber=response.get('orderNumber',''), status='error' if response.get('error',False) else 'open', net_amount=((1 if action == 'buy' else -1 ) * amount)) t.calculatefees() t.calculate_exchange_rates() t.save() self.trs[i].trade = t self.trs[i].save() if not response.get('error',False): #make opposite trade in {granularity} minutes ot = Trade(type='buy' if action =='sell' else 'sell', symbol=currencyPair, price=0, amount=amount, response='', orderNumber='', status='scheduled', net_amount=((1 if action == 'sell' else -1 ) * amount), created_on=(datetime.datetime.now() + datetime.timedelta(minutes=config['granularity']))) ot.save() #make this trade now t.opposite_trade = ot ot.opposite_trade = t t.save() ot.save()
def act_upon_recommendation(self, i, recommendation): # setup config = self.predictor_configs[i] currencyPair = config['symbol'] # bid right below the lowest ask, or right above the highest bid so that our orders get filled price = Price.objects.filter( symbol=currencyPair).order_by('-created_on').first() if recommendation == 'sell': rate = price.lowestask * 0.999 else: rate = price.highestbid * 1.001 # decide action action = recommendation.lower() amount = 0.00 if action in ['buy', 'sell']: amount = self.decide_trade_amount(action, i) # do items response = {} if action == 'buy': try: response = {} if not settings.MAKE_TRADES else self.poo.buy( currencyPair, rate, amount) except Exception as e: print_and_log('(t)act_upon_recommendation:buy: ' + str(e)) elif action == 'sell': try: response = {} if not settings.MAKE_TRADES else self.poo.sell( currencyPair, rate, amount) except Exception as e: print_and_log('(t)act_upon_recommendation:sell: ' + str(e)) else: print_and_log( '(t)---- act_upon_recommendation declining to act. NNs not decisive' ) if response or not settings.MAKE_TRADES: print_and_log( '(t)---- act_upon_recommendation performing {} for {} units. response from api: {}' .format(action, amount, response)) # make this trade now t = Trade( type=action, symbol=currencyPair, price=rate, amount=amount, response=response, orderNumber=response.get('orderNumber', ''), status='error' if response.get('error', False) else 'open', net_amount=((1 if action == 'buy' else -1) * amount)) t.calculatefees() t.calculate_exchange_rates() t.save() self.trs[i].trade = t self.trs[i].save() if not response.get('error', False): # make opposite trade in {granularity} minutes ot = Trade( type='buy' if action == 'sell' else 'sell', symbol=currencyPair, price=0, amount=amount, response='', orderNumber='', status='scheduled', net_amount=((1 if action == 'sell' else -1) * amount), created_on=( datetime.datetime.now() + datetime.timedelta(minutes=config['granularity']))) ot.save() # make this trade now t.opposite_trade = ot ot.opposite_trade = t t.save() ot.save()
def handle(self, *args, **options): ticker_options = ['BTC_ETH','USDT_BTC'] hidden_layer_options = [1,5, 15, 40] # 2/23 -- removed 15, it was barely edged out by 1,5. # 2/25 -- added 15, 40 in because of recent bugs min_back_options = [100,1000,24*60,24*60*2] # 2/22 - eliminated 10000 # 2/25 -- added 24*50, 24*60*2 because "maybe because NN only contains last 1000 data points (1/3 day). if only selling happened during taht time, nn will bias towards selling. duh!" granularity_options = [10, 15, 20, 30, 40, 50, 60, 120,240] # 2/23 notes - results so far: 59 (54% correct) 15 (56% correct) 1 (50% correct) 5 (52% correct). removing 1,5, adding 30 . 2/23 (pt 2) -- added 119, 239 # 2/24 notes -- removed 120,240, added 20, 40, 45 # 2/25 notes -- added 10, 50, removed 45 # 2/25 -- added 120,240 back in to retest in light of recent bugs datasetinput_options = [1,2,3,4,5,6, 15,10,20,40,100, 200] # 2/23 -- removed 3,5,15 -- added 20,40,100 # 2/24 -- removed 7,10,20,40,100, added 3,4,5 # 2/25 -- added 3,5,15,10,20,40,100 back in to retest in light of recent bugs epoch_options = [1000] # 2/22 -- eliminated 4000, 100 bias_options = [True] # 2/22 -- Eliminated 'False' momentum_options = [0.1] learningrate_options = [0.05] #2/22 -- elimated 0.005, 0.01, adding 0.03 and 0.1 today. #2/23 - 0.1 (54% correct) 0.05 (55% correct) 0.03 (54% correct) . eliminating everything but 0.05 so i can test more #datasetinput_options weightdecay_options = [0.0] # 2/22 -- eliminated 0.1,0.2 recurrent_options = [True] # 2/23 notes - 0 (52% correct) 1 (55% correct), removed false timedelta_back_in_granularity_increments_options = [10,30,60,100,1000] #sets how far apart (in granularity increments) the datasets are for ticker in ticker_options: for hidden_layers in hidden_layer_options: for min_back in min_back_options: for epochs in epoch_options: for granularity in granularity_options: for datasetinputs in datasetinput_options: for bias in bias_options: for momentum in momentum_options: for learningrate in learningrate_options: for weightdecay in weightdecay_options: for recurrent in recurrent_options: for timedelta_back_in_granularity_increments in timedelta_back_in_granularity_increments_options: try: predict_v2(ticker, hidden_layers=hidden_layers, NUM_MINUTES_BACK=min_back, NUM_EPOCHS=epochs, granularity_minutes=granularity, datasetinputs=datasetinputs, learningrate=learningrate, bias=bias, momentum=momentum, recurrent=recurrent, weightdecay=weightdecay, timedelta_back_in_granularity_increments=timedelta_back_in_granularity_increments) except Exception as e: print_and_log("(p)"+str(e))
def handle(self, *args, **options): ticker_options = ['BTC_ETH', 'USDT_BTC'] hidden_layer_options = [1, 5, 15, 40] # 2/23 -- removed 15, it was barely edged out by 1,5. # 2/25 -- added 15, 40 in because of recent bugs min_back_options = [100, 1000, 24 * 60, 24 * 60 * 2] # 2/22 - eliminated 10000 # 2/25 -- added 24*50, 24*60*2 because "maybe because NN only contains last 1000 data points (1/3 day). if only selling happened during taht time, nn will bias towards selling. duh!" granularity_options = [10, 15, 20, 30, 40, 50, 60, 120, 240] # 2/23 notes - results so far: 59 (54% correct) 15 (56% correct) 1 (50% correct) 5 (52% correct). removing 1,5, adding 30 . 2/23 (pt 2) -- added 119, 239 # 2/24 notes -- removed 120,240, added 20, 40, 45 # 2/25 notes -- added 10, 50, removed 45 # 2/25 -- added 120,240 back in to retest in light of recent bugs datasetinput_options = [1, 2, 3, 4, 5, 6, 15, 10, 20, 40, 100, 200] # 2/23 -- removed 3,5,15 -- added 20,40,100 # 2/24 -- removed 7,10,20,40,100, added 3,4,5 # 2/25 -- added 3,5,15,10,20,40,100 back in to retest in light of recent bugs epoch_options = [1000] # 2/22 -- eliminated 4000, 100 bias_options = [True] # 2/22 -- Eliminated 'False' momentum_options = [0.1] learningrate_options = [0.05] #2/22 -- elimated 0.005, 0.01, adding 0.03 and 0.1 today. #2/23 - 0.1 (54% correct) 0.05 (55% correct) 0.03 (54% correct) . eliminating everything but 0.05 so i can test more #datasetinput_options weightdecay_options = [0.0] # 2/22 -- eliminated 0.1,0.2 recurrent_options = [True] # 2/23 notes - 0 (52% correct) 1 (55% correct), removed false timedelta_back_in_granularity_increments_options = [ 10, 30, 60, 100, 1000 ] #sets how far apart (in granularity increments) the datasets are for ticker in ticker_options: for hidden_layers in hidden_layer_options: for min_back in min_back_options: for epochs in epoch_options: for granularity in granularity_options: for datasetinputs in datasetinput_options: for bias in bias_options: for momentum in momentum_options: for learningrate in learningrate_options: for weightdecay in weightdecay_options: for recurrent in recurrent_options: for timedelta_back_in_granularity_increments in timedelta_back_in_granularity_increments_options: try: predict_v2( ticker, hidden_layers= hidden_layers, NUM_MINUTES_BACK =min_back, NUM_EPOCHS= epochs, granularity_minutes =granularity, datasetinputs= datasetinputs, learningrate= learningrate, bias=bias, momentum= momentum, recurrent= recurrent, weightdecay= weightdecay, timedelta_back_in_granularity_increments =timedelta_back_in_granularity_increments ) except Exception as e: print_and_log( "(p)" + str(e))
def predict_v2(ticker, hidden_layers=15, NUM_MINUTES_BACK=1000, NUM_EPOCHS=1000, granularity_minutes=15, datasetinputs=5, learningrate=0.005, bias=False, momentum=0.1, weightdecay=0.0, recurrent=False, timedelta_back_in_granularity_increments=0): # setup print_and_log("(p)starting ticker:{} hidden:{} min:{} epoch:{} gran:{} dsinputs:{} learningrate:{} bias:{} momentum:{} weightdecay:{}\ recurrent:{}, timedelta_back_in_granularity_increments:{} ".format( ticker, hidden_layers, NUM_MINUTES_BACK, NUM_EPOCHS, granularity_minutes, datasetinputs, learningrate, bias, momentum, weightdecay, recurrent, timedelta_back_in_granularity_increments)) pt = PredictionTest() pt.type = 'mock' pt.symbol = ticker pt.datasetinputs = datasetinputs pt.hiddenneurons = hidden_layers pt.minutes_back = NUM_MINUTES_BACK pt.epochs = NUM_EPOCHS pt.momentum = momentum pt.granularity = granularity_minutes pt.bias = bias pt.bias_chart = -1 if pt.bias is None else (1 if pt.bias else 0) pt.learningrate = learningrate pt.weightdecay = weightdecay pt.recurrent = recurrent pt.recurrent_chart = -1 if pt.recurrent is None else (1 if pt.recurrent else 0) pt.timedelta_back_in_granularity_increments = timedelta_back_in_granularity_increments all_output = "" start_time = int(time.time()) # get neural network & data pt.get_nn() sample_data, test_data = pt.get_train_and_test_data() # output / testing round_to = 2 num_times_directionally_correct = 0 num_times = 0 diffs = [] profitloss_pct = [] for i, val in enumerate(test_data): try: # get NN projection sample = create_sample_row(test_data, i, datasetinputs) recommend, nn_price, last_sample, projected_change_pct = pt.predict(sample) # calculate profitability actual_price = test_data[i+datasetinputs] diff = nn_price - actual_price diff_pct = 100 * diff / actual_price directionally_correct = ((actual_price - last_sample) > 0 and (nn_price - last_sample) > 0) \ or ((actual_price - last_sample) < 0 and (nn_price - last_sample) < 0) if recommend != 'HOLD': profitloss_pct = profitloss_pct + [abs((actual_price - last_sample) / last_sample) * (1 if directionally_correct else -1)] if directionally_correct: num_times_directionally_correct = num_times_directionally_correct + 1 num_times = num_times + 1 diffs.append(diff) output = "{}) seq ending in {} => {} (act {}, {}/{} pct off); Recommend: {}; Was Directionally Correct:{}\ ".format(i, round(actual_price, round_to), round(nn_price, round_to), round(actual_price, round_to), round(diff, round_to), round(diff_pct, 1), recommend, directionally_correct) all_output = all_output + "\n" + output except Exception as e: if "list index out of range" not in str(e): print_and_log("(p)"+str(e)) pass avg_diff = sum([abs(diff[0]) for diff in diffs]) / num_times # noqa pct_correct = 100 * num_times_directionally_correct / num_times modeled_profit_loss = sum(profitloss_pct) / len(profitloss_pct) output = 'directionally correct {} of {} times. {}%. avg diff={}, profit={}'.format( num_times_directionally_correct, num_times, round(pct_correct, 0), round(avg_diff, 4), round(modeled_profit_loss, 3)) print_and_log("(p)"+output) all_output = all_output + "\n" + output end_time = int(time.time()) pt.time = end_time - start_time pt.prediction_size = len(diffs) pt.output = all_output pt.percent_correct = pct_correct pt.avg_diff = avg_diff pt.profitloss = modeled_profit_loss pt.profitloss_int = int(pt.profitloss * 100) pt.save() return pt.pk