Пример #1
0
    def sell(self, code, unit_price=0, cnt=0, desc=""):
        kl = self.kls[code]
        data = kl.get_kabuka()
        (indexes, dates, open, high, low, close, volume) = self.data

        if self.stocks.has_key(code) == False:
            return False

        if unit_price == 0:
            unit_price = close[indexes[self.date]]

        if cnt == 0:
            cnt = self.stocks[code].get_cnt()

        s = self.stocks[code]
        buy_price = s.get_buy_price()
        price = unit_price * cnt
        pm = price - buy_price * cnt
        ret = s.sell(unit_price, cnt)
        if ret < 0:
            return False
        if ret == 0:
            del self.stocks[code]

        self.balance = self.balance + price - kf.get_charge(price)
        f.log(
            "[sell] C=%s, UP=%d, CNT=%d, P=%d, B=%d, PM=%d: R=[%s]"
            % (code, unit_price, cnt, price, self.balance, pm, desc)
        )
Пример #2
0
def learn2(prgname, meigaras=[]):
    f.log("Starting learning")
    rootd = f.get_json_data(prgname)
    
    pf = get_element("play_fields", rootd)
    
    # Generate PlayField Data
    if pf["enabled"] == 1:
        mpf.create_pf2(prgname, meigaras)

    # Generate PlayField Data
    if pf["enabled"] == 1:
        mpf.create_pf(prgname, meigaras)

    # Train Players
    tp = get_element("train_player", rootd)
    if tp == None:
        return
    if tp["enabled"] == 1:
        startd = tp["startd"]
        endd = tp["endd"]
        tm = TraderMemory(meigaras, startd, endd)
        #mtp.train(prgname, tm)
        trn = Training(prgname, tm)
        trn.train()
    
    f.log("Finished learning")
Пример #3
0
 def _check_forecast_proc(self, datei, mode="DAILY"):
     if self.use_forecast == False:
         return (DIR_UNKNOWN, 0)
     datei -= 1
     if self.forec_cache.has_key(datei):
         return self.forec_cache[datei]
     
     starti = datei - self.peak_check_period + 1
     results = []
     ximages = self.tfl.get_xs(self.code, self.data, starti, datei)
     if len(ximages) > 0:
         results = self.tfl.predict(ximages)
     (open, high, low, close, volume) = self._get_prices(datei+1, self.peak_check_period+2)
     for i in reversed(range(len(results))):
         points = results[-i]
         max_point = max(points)
         for j in range(len(points)):
             if points[j] > TRUSTED_POINTS and points[j] == max_point:
                 if j == 0 and min(low) >= low[-2-i]:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Bottom peak. Forecast=Up" % self.code)
                     self.forec_cache[datei] = (DIR_UP, points[j])
                     return self.forec_cache[datei]
                 if j == 2 and max(high) <= high[-2-i]:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Top peak, Forecast=Down" % self.code)
                     self.forec_cache[datei] = (DIR_DOWN, points[j])
                     return self.forec_cache[datei]
     self.forec_cache[datei] = (DIR_UNKNOWN, 0)
     return self.forec_cache[datei]
Пример #4
0
    def g_copy_history(self, endclock):
        lastclock = self._get_lastcopyclock()
        startclock = cf.clockaddminutes(endclock, -D_HISTORY_COPY_CLOCK_RANGE)
        if lastclock <= 0:
            lastclock = startclock
        if startclock > lastclock and lastclock > 0:
            endclock2 = cf.clockaddminutes(lastclock, +D_HISTORY_COPY_CLOCK_RANGE*2)
            endclock = min(endclock, endclock2)
        startclock = startclock

        condlist = ["itemid in (%s)" % ",".join(cf.list2strlist(self._itemids))]
        condlist.append("clock >= %s" % str(startclock))
        condlist.append("clock <= %s" % str(endclock))
        strwhere = sf.list2where(condlist)

        strsql = "select itemid, clock, value from history %s;" % (strwhere)
        if DEBUG:
            print strsql
            cf.log(strsql)

        g_zbx = sf.g_get_data(MYSQL_ZBX, strsql)
        strsql = ""
        for row in g_zbx:
            (itemid, clock, value) = row
            yield row
            strsql = "%sreplace into history (itemid, clock, value) values(%s,%s,%s);\n" \
                % (strsql, itemid, clock, value)
        sf.exec_updsql(MYSQL_JUBATUS, strsql)
        self._upd_lastcopyclock(endclock)
Пример #5
0
    def _run_anomaly(self, endclock, method="add"):
        # Prepare data
        if method == "add":
            g_data = self.dgetter.g_get_history(endclock)
        if method == "calc":
            g_data = self.dgetter.g_copy_history(endclock)
        datadict = {}

        for row in g_data:
            (itemid, clock, value) = row
            itemid = str(itemid)

            if datadict.has_key(itemid):
                self._juba_proc(clock, datadict, method)
                datadict = {}

            if not datadict.has_key(self.hostid):
                (hour, weekday) = self.expand_clock(clock)
                datadict["hostid"] = self.hostid
                datadict["weekday"] = weekday*1.0/7
                datadict["hour"] = hour*1.0/24


            datadict[itemid] = value

        if len(datadict) > 0:
            self._juba_proc(clock, datadict, method)

        if method=="add":
            cf.log("Saving learned model")
            self.anom.save("latest")
            self.dgetter.remove_history(endclock)
Пример #6
0
def learn(prgname, meigaras=[]):
    f.log("Starting learning")
    tm = TraderMemory(meigaras, startd, endd)
    trn = Training(prgname, tm)
    trn.train()
    
    f.log("Finished learning")
Пример #7
0
 def restore(self, ckptfile):
     if os.path.exists(ckptfile):
         saver = tf.train.Saver()
         saver.restore(self.sess, ckptfile)
         cf.log("Loaded from %s" % (ckptfile))
     else:
         cf.log("Ckpt file %s does not exists." % (ckptfile))
Пример #8
0
 def _lm2trader(self):
     meigaras = self.traders.keys()
     if len(meigaras) >= self.division:
         f.log("Already holds %d stocks." % len(meigaras))
     for i in range(0, self.division):
         if len(meigaras) == 0:
             wherestr = ""
         else:
             wherestr = "where %s" % (kf.where_code_in(meigaras, True))
         strsql = "select name, code, fieldid, geneid, points, holding_days, trade_mode \
              from trade.learned_memory %s order by points desc limit 100;" % (wherestr)
         data = sql.exec_selsql(strsql)
         if len(data) == 0:
             f.log("No good meigaras to get this day.")
             break
         
         for row in data:
             i = iter(row)
             name = next(i)
             code = next(i)
             fieldid = next(i)
             geneid = next(i)
             points = next(i)
             holding_days = int(next(i))
             trade_mode = next(i)
             
             if self.traders.has_key(code) == False:
                 return Trader(code, geneid, self.tm, trade_mode, holding_days, 
                               "name=%s, fieldid=%s, geneid=%s, points=%s, holding_days=%s, trade_mode=%s" \
                               % (name, fieldid, geneid, str(points), str(holding_days), trade_mode))
Пример #9
0
 def restore(self, load_dir="", current_date=""):
     ver = -1
     if self.is_restored:
         return (False, "", 0)
     self.set_params(save_folder=load_dir)
     if os.path.exists(self.version_file):
         try:
             with open(self.version_file, "r") as f:
                 line = (f.read()).split(",")
                 ver = line[0]
                 self.train_accuracy = line[1]
                 f.close()
             if current_date != "":
                 if ver >= self.startd:
                     cf.log("Ckpt file is newer than current date.")
                     return (False, ver, self.train_accuracy)
                 if ver < dtf.datestrsub(current_date, CKPT_VALID_DAYS):
                     cf.log("Ckpt file is too old.")
                     return (False, ver, self.train_accuracy)
             saver = tf.train.Saver()
             saver.restore(self.sess, self.ckptfile)
             cf.log("Loaded from %s" % (self.ckptfile))
             self.is_restored = True
             return (True, ver, self.train_accuracy)
         except Exception as e:
             cf.log("Failed to load")
             raise
     else:
         cf.log("Ckpt file %s does not exists." % (self.ckptfile))
         return (False, ver, self.train_accuracy)
Пример #10
0
 def _calc_score_proc(self, geneid):
     self.parameter = -1
     self.fieldids = self.coaches.keys()
     self._alloc_players(geneid)
     self._play_proc(self.fieldids)
     self._calc()
     self._selection()
     f.log("Finished playing")
Пример #11
0
 def log(self, msg):
     if self.logfile == "":
         f.log(msg)
     else:
         logdir = "%s/simlator3" % LOG_DIR
         if os.path.exists(logdir) == False:
             os.makedirs(logdir)
         f.log(msg, self.logfile, logdir)
Пример #12
0
def learn_line(prgname, meigaras=[]):
    f.log("Starting learning")
    rootd = f.get_json_data(prgname)
    
    pf = get_element("play_fields", rootd)
    
    # Generate PlayField Data
    if pf["enabled"] == 1:
        mpf.create_pf2(prgname, meigaras)
Пример #13
0
    def train(self):
        for i in range(TRAIN_NUM):
            # batch = mnist.train.next_batch(25)
            batch = self.get_rnd_lines(BATCH_SIZE)
            if i % 10 == 0:
                train_accuracy = self.accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
                cf.log("step %d, training accuracy %g" % (i, train_accuracy))
            self.train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

        cf.log("Finished training")
Пример #14
0
 def run(self):
     for i in range(50):
         batch = mnist.train.next_batch(25)
         if i%10 == 0:
             train_accuracy = self.accuracy.eval(feed_dict={
                 x:batch[0], y_: batch[1], keep_prob: 1.0})
             cf.log("step %d, training accuracy %g"%(i, train_accuracy))
         self.train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
     
     cf.log("Finished training")
Пример #15
0
 def plot(self, startd, endd):
     (indexes, dates, open, high, low, close, volume) = self.data
     startd = self._get_near_date(startd, "f", indexes)
     endd = self._get_near_date(endd, "b", indexes)
     if startd == endd:
         f.log("plot:startd and endd is the same for code %s" % self.code)
     starti = indexes[startd]+60
     endi = indexes[endd]
     
     (xsl, ysl, labelsl) = self._get_xy(starti, endi, dates)
     plt.plot_kabuka2png(xsl, ysl, labelsl, "%s/%s_%s-%s.png" % (TMP_DIR, self.code, startd, endd))
Пример #16
0
 def __init__(self, meigaras, startd="", endd=""):
     '''
     Constructor
     '''
     self.startd = startd
     self.endd = endd
     self.normal_lines = {}
     self.base_lines = {}
     self.trend_lines = {}
     self.data = kf.get_kabuka(meigaras, startd, endd)
     self._make_lines()
     f.log("finished preparation")
Пример #17
0
 def train(self, train_num=0):
     if train_num == 0:
         train_num = TRAIN_NUM
     for i in range(train_num):
         #batch = mnist.train.next_batch(25)
         batch = self.get_line_data()
         if i%10 == 0:
             train_accuracy = self.accuracy.eval(feed_dict={
                 self.x:batch[0], self.y_: batch[1], self.keep_prob: 1.0})
             cf.log("step %d, training accuracy %g"%(i, train_accuracy))
         self.train_step.run(feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})
     
     cf.log("Finished training")
Пример #18
0
    def buy(self, code, unit_price, cnt, desc=""):
        price = unit_price * cnt
        tmp_balance = self.balance - price - kf.get_charge(price)
        if tmp_balance < 0:
            return False

        if self.stocks.has_key(code):
            self.stocks[code].buy(unit_price, cnt)
        else:
            self.stocks[code] = StockInfo2(code, self.date, self.kls[code], unit_price, cnt)
        self.balance = tmp_balance
        f.log("[buy] C=%s, UP=%d, CNT=%d, P=%d, B=%d: R=[%s]" % (code, unit_price, cnt, price, self.balance, desc))
        return True
Пример #19
0
def create_play_field(startd, endd, meigaras=[]):
    f.log("Starting to create play field")
    (mcode, dl, X, y, report) = gen_pf_line("20150601", "20160101", meigaras)
    mcode = np.array(mcode)
    dl = np.array(dl)
    X = np.array(X)

    tablename = "pf_line"
    tbl.create_table_from_tmpl("pf2", [tablename])

    r = np.c_[mcode, dl, X]
    tbl.arr2table(r, tablename)
    f.log("Finished creating play field")
Пример #20
0
 def _set_dicts(self):
     odb = db()
     cursor = odb.get_cursor()
     #strsql = "select code, date, close, volsize, dow_sign from trade.dow "
     strsql = "select code, date, close from trade.kabuka "
     wherestr = kf.general_kabuka_where(self.meigaras, self.startd, self.endd)
     strsql = strsql + wherestr
     strsql = strsql + " order by code, date;"
     
     try:
         cursor.execute(strsql)
     except MySQLdb.Error, e:
         f.log(strsql)
         raise Exception("SQL ERROR %d: %s" % (e.args[0], e.args[1]))
Пример #21
0
def train_bydate(cnt=100):
    json = f.get_json_data("ml_train_tf")
    span = json["span"]
    
    term_len = json["term_len"]
    dup_len = json["dup_len"]
    
    train_startd = span[0]
    train_endd = span[1]
    
    starti = 0
    eigyobis = dtf.get_eigyobis(train_startd, train_endd)
    while starti+term_len <= len(eigyobis):
        tmp_endd = eigyobis[starti+term_len]
        f.log("Training nikkei 225")
        train225(eigyobis[starti], tmp_endd)
        
        
        f.log("Training from %s to %s" % (eigyobis[starti], tmp_endd))
        multi_training(eigyobis[starti], tmp_endd, get_meigaras(cnt, tmp_endd))
        
        predict_starti = starti+term_len+1
        if predict_starti < len(eigyobis):
            predict_endi = predict_starti + term_len
            if predict_endi >= len(eigyobis):
                predict_endi = len(eigyobis)-1
            
            tmp_endd = eigyobis[predict_endi]
            f.log("Making prediction from %s to %s" % (eigyobis[predict_starti], tmp_endd))
            multi_predict2db(eigyobis[starti], tmp_endd, get_meigaras(cnt, tmp_endd, term_len))
        
        starti += term_len - dup_len
        
    f.log("Finished")
Пример #22
0
    def g_get_history(self, endclock):
        itemids = self._itemids
        startclock = cf.clockaddminutes(endclock, -LEARN_PERIOD)
        condlist = ["itemid in (%s)" % ",".join(cf.list2strlist(itemids))]
        condlist.append("clock >= %d" % int(startclock))
        condlist.append("clock <= %d" % int(endclock))
        strwhere = sf.list2where(condlist)

        # Get history data from Zabbix
        strsql = "select itemid, clock, value from history %s order by clock, itemid;" % (strwhere)
        if DEBUG:
            cf.log(strsql)

        g_data = sf.g_get_data(MYSQL_JUBATUS, strsql)
        for row in g_data:
            (itemid, clock, value) = row
            yield (itemid, clock, value)
Пример #23
0
    def _juba_proc(self, clock, datadict, method="add"):
        #if DEBUG:
        #    print datadict
        datum = Datum()
        for k in datadict.keys():
            #print "key:%s value:%s" % (str(k), str(datadict[k]))
            if k == "hostid":
                datum.add_number(str(k), int(datadict[k])*1.0/ZBX_ITEMID_DIGITS)
            elif k == "weekday" or k == "hour":
                datum.add_number(str(k), datadict[k])
            elif k != "hostid" and k != "weekday" and k != "hour":
                datum.add_number(str(k), self.norm(k, datadict[k]))
        #print datum

        retry_cnt = JUBA_RETRY_MAX
        while True:
            try:
                if method=="add":
                    print datum
                    ret = self.anom.add(datum)
                    exit()
                if method=="calc":
                    print datum
                    score = self.anom.calc_score(datum)
                    if score == float('Inf') or score > ML_LIMIT:
                        #print datadict
                        if self.alarm_on == False:
                            self.alarm_on = True
                            cf.log("[%s] score=%f" % (cf.clock2strjst(clock), score))
                    else:
                        if self.alarm_on == True:
                            self.alarm_on = False
                            cf.log("[%s] score recovered to normal:score=%f" % (cf.clock2strjst(clock), score))

                break
            except (msgpackrpc.error.TransportError, msgpackrpc.error.TimeoutError) as e:
                retry_count -= 1
                if retry_count <= 0:
                    raise
                self.anom.get_client().close()
                self.set_anom()

                print e
                time.sleep(JUBA_RETRY_INTERVAL)
                continue
Пример #24
0
 def _train_proc(self):
     self.fieldids = self.coaches.keys()
     f.log("  Number of fields: %d" % len(self.fieldids))
     f.log("  Allocating first players")
     self._alloc_players_from_db()
     self._alloc_default_players()
     self._alloc_random_players()
     for i in range(0, self.train_count+1):
         f.log("  Processing %d turn.." % (i))    
         random.shuffle(self.fieldids)
         self._mating()
         self._copy()
         self._mutate()
         self._alloc_random_players()
         f.log("    Training..")
         self._play()
         f.log("    Selection..")
         self._selection()
Пример #25
0
 def train(self):
     f.log("Start training..")
     self._set_params()
     
     # Generate coaches for each play fields    
     f.log("Getting coaches")
     self._get_coaches(self.pf_table)
     
     # Create table if not exists
     tbl.create_table_from_tmpl(self.tp_template, [self.tp_table])
     
     # Train
     f.log("Training with GA")
     self._train_proc()
     
     # Insert report into table
     f.log("Creating report")
     self._report()
     f.log("Finished training..")
Пример #26
0
    def simulate(self):
        datelist = dtf.get_datestr(self.startd, self.endd)
        self.relative_date_idx = 0
        for d in datelist:
            self._set_date(d)
            
            trader = self.choose(self.traders.keys())
            if trader is not None:
                code = trader.get_code()
                if not self.traders.has_key(code):
                    self.traders[code] = trader
            for code in self.traders.keys():
                self.trade(code)
            

        (eval_price, holding_n) = self.get_eval()
        #self._sell_all()
        f.log("Final evaluate amount is " + str(eval_price))
        f.log("Finished simulation.")
Пример #27
0
    def simulate(self):
        datelist = dtf.get_datestr(self.startd, self.endd)
        self.relative_date_idx = 0
        for d in datelist:
            self._set_date(d)
            self.sell()
            
            trader = self.choose(self.meigaras, self.traders.keys())
            self.buy(trader)
            
            self.relative_date_idx += 1
            if self.relative_date_idx == self.one_cycle_period and self.one_cycle_period > 0:
                self.relative_date_idx = 0
            

        eval_price = self.account.get_eval_price()
        init_amount = self.account.get_init_amount()
        #self._sell_all()
        f.log("Final evaluate amount is " + str(eval_price))
        f.log("Finished simulation.")
Пример #28
0
 def check_forecast(self, datei):
     if self.use_forecast == False:
         return (DIR_UNKNOWN, 0)
     datei -= 1
     if self.forec_cache.has_key(datei):
         return self.forec_cache[datei]
     
     starti = datei - self.peak_check_period + 1
     (indexes, dates, open, high, low, close, volume) = self.data
     #self.tfl.train(11)
     #ximage = self.tfl.get_xs(self.code, self.data, starti, datei)
     #results = tff.predict(self.code, self.data, starti, datei, self.code)
     results = []
     for d in range(starti, datei+1):
         predict = self.get_prediction(dates[d])
         if predict.has_key("last_update") == False:
             continue
         last_update = predict["last_update"]
         if last_update <= dates[datei]:
             results.append(predict["value"])
     (open, high, low, close, volume) = self._get_prices(datei, self.peak_check_period+1)
     for i in reversed(range(len(results))):
         points = results[-i]
         if len(points) == 0:
             continue
         max_point = max(points)
         for j in range(len(points)):
             if points[j] > TRUSTED_POINTS and points[j] == max_point:
                 if j == 0 and min(low) >= low[-2-i] and low[0] > low[-2-i]:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Bottom peak. Forecast=Up" % self.code)
                     self.forec_cache[datei] = (DIR_UP, points[j])
                     return self.forec_cache[datei]
                 if j == 2 and max(high) <= high[-2-i] and high[0] < high[-2-i]:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Top peak, Forecast=Down" % self.code)
                     self.forec_cache[datei] = (DIR_DOWN, points[j])
                     return self.forec_cache[datei]
     self.forec_cache[datei] = (DIR_UNKNOWN, 0)
     return self.forec_cache[datei]
Пример #29
0
 def check_forecast(self, datei):
     if self.use_forecast == False:
         return DIR_UNKNOWN
     datei -= 1
     starti = datei - self.peak_check_period + 1
     ximage = self.tf_learning.get_xs(self.code, self.data, starti, datei)
     results = self.tf_learning.predict(ximage)
     for i in reversed(range(len(results))):
         points = results[-i]
         max_point = max(points)
         for j in range(len(points)):
             if points[j] > TRUSTED_POINTS and points[j] == max_point:
                 if j == 0:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Bottom peak. Forecast=Up" % self.code)
                     return DIR_UP
                 if j == 2:
                     if DEBUG and self.curr_holding_n > 0:
                         f.log("[%s] Top peak, Forecast=Down" % self.code)
                     return DIR_DOWN
                 
     return DIR_UNKNOWN
Пример #30
0
def simulate(dates):
    dates.sort()
    for i in range(1, len(dates)):
        startd = dates[i-1]
        endd = dates[i]
        files = glob.glob("%s/20*" % PNFCKPT_DIR)
        files.sort()
        ckptdir = ""
        for f in reversed(files):
            if os.path.isdir(f):
                foldername = f.split("/")[-1]
                if startd >= foldername:
                    ckptdir = foldername
                    break
        if ckptdir == "":
            f.log("No learned data")
            exit(1)
        
        meigaras = kf.get_goodmeigaras(endd, 20)
        s = Simulator3(meigaras, "BOTH", "%s.log" % ckptdir, ckptdir)
        s.simulate(startd, endd)
        s = None