def kline_similarity(k, f, size): c = [i[4] for i in k[:-1]] c = [j / c[0] for j in c] diff, dea, _macd = utils.macd(np.array(c)) r = [] for i in range(len(diff)): a, b, c = 0, 0, 0 if i != len(diff) - 1: continue for j in range(size): a += math.pow(f["diff"][j] - diff[i - size + 1 + j], 2) b += math.pow(f["dea"][j] - dea[i - size + 1 + j], 2) c += math.pow(f["macd"][j] - _macd[i - size + 1 + j], 2) return math.sqrt(a + b + c)
def feature_benchmark(k, f, size): p = config.benchmark c = [i[4] for i in k[:-1]] c = [j / c[0] for j in c] diff, dea, _macd = utils.macd(np.array(c)) r = [] for i in range(len(diff)): a, b, c = 0, 0, 0 if i < 100: continue for j in range(size): a += math.pow(f["diff"][j] - diff[i - size + 1 + j], 2) b += math.pow(f["dea"][j] - dea[i - size + 1 + j], 2) c += math.pow(f["macd"][j] - _macd[i - size + 1 + j], 2) r.append(math.sqrt(a + b + c)) r.sort() return r[int(len(r) * (100 - p) / 100.0)]
def handle_data(context, data): hour = context.current_dt.hour minute = context.current_dt.minute if hour == 14 and minute == 59: # 测试移动平均线 MA5 = utils.ma(g.security, 5) MA10 = utils.ma(g.security, 10) MA20 = utils.ma(g.security, 20) log.info("%s MA测试 ==> MA5=%s, MA10=%s, MA20=%s", g.security, f2(MA5), f2(MA10), f2(MA20)) # 测试是否多头排列 is_bull_market = utils.is_bull_market(g.security) log.info('%s 是否多头排列测试 ==> %s', g.security, is_bull_market) # 测试macd diff,dea,macd = utils.macd(g.security) log.info("%s MACD测试 ==> DIFF=%s, DEA=%s, MACD=%s", g.security, f2(diff[-1]), f2(dea[-1]), f2(macd[-1])) # 获取股票的位置,默认为近两年 recent_gains, recent_decline = utils.stock_position(g.security) log.info("==> %s股票的位置:近期涨幅=%.2f,近期跌幅=%.2f", g.security, recent_gains, recent_decline)
def feature(k, point, feature_size): c = [i[4] for i in k[:-1]] c = [j / c[0] for j in c] diff, dea, _macd = utils.macd(np.array(c)) f = {} for ki in range(len(c)): if k[ki][0] == point: break f["diff"] = list(diff[ki - feature_size:ki]) f["dea"] = list(dea[ki - feature_size:ki]) f["macd"] = list(_macd[ki - feature_size:ki]) f["v"] = feature_benchmark(k, f, feature_size) #print f["v"] #print "".join(hashlib.md5(json.dumps(f)).hexdigest()) #print point #print "".join(hashlib.md5(json.dumps(k[:-1])).hexdigest()) #print k[:-1] #print point #print ki - feature_size, ki return f