예제 #1
0
def _playLoop(features, param_grid):
    """Loop through the different macd paramaters to tests"""
    now = du.TIME_TRAVELER.getTime()
    param_grid_len = len(param_grid)

    for i, param in enumerate(param_grid):
        if param["fast_delay"] >= param["slow_delay"]:
            continue

        features["macd"] = indic.macd(features["vwap"], param["fast_delay"],
                                      param["slow_delay"],
                                      param["signal_delay"])
        # if log.VERBOSE >= 4:
        log.debug("Testing params:", param["fast_delay"], param["slow_delay"],
                  param["signal_delay"])

        _resetLedgers()
        param["score"] = _play(features)

        percent = i / param_grid_len * 100
        if i and not bool(percent % 1):
            log.debug(
                str(int(percent)) + "% done", "- best yet:",
                sorted(param_grid, key=lambda k: k["score"])[-1])

    im.timeTravel(now)
    return sorted(param_grid, key=lambda k: k["score"])
예제 #2
0
def _play(features):
    """Play an epoch with the given macd parameters"""

    time_base = features.index[0]
    now = time_base
    index = 0  # in case len(features) == 0
    for index, feature in enumerate(features.values):
        macd = feature[1]
        if np.isnan(macd):
            continue

        now += du.secToNano(index * conf.TIME_INTERVAL * 60)
        im.timeTravel(now)
        if macd < -MIN_MACD:
            lm.buyOrSell(ActionEnum.SELL, CryptoEnum.XBT)
        elif macd > MIN_MACD:
            lm.buyOrSell(ActionEnum.BUY, CryptoEnum.XBT)

        if lm.gameOver():
            # if log.VERBOSE >= 4:
            log.warning("game over:", index, "/", len(features))
            return -42

    score = lm.getGlobalBalanceInQuote()
    hodl = features["vwap"].iat[index] / features["vwap"].iat[0] * 100

    # if log.VERBOSE >= 4:
    log.debug("score:", int(score - hodl),
              "(" + str(int(score)) + "-" + str(int(hodl)) + ")")

    return score  # - hodl
예제 #3
0
def trainModels(since):
    """Train all models and save the awesome result"""
    for model in mb.MODELS:
        if model.need_training:
            score = model.train(since)
            log.debug("Score:", score)
            if score:
                log.info("Saving model")
                model.save()
예제 #4
0
def fetch(unused_args):
    """Fetch raw trade data since the beginning of times"""
    while not sig.EXIT and not im.fetchInputs():
        last_fetch = min((i.current_row.name for i in ib.INPUTS
                          if i.current_row is not None))
        log.info("Fetched data till", du.toStr(last_fetch))

    if not sig.EXIT:
        log.debug("Fetching done, optimizing database...")
        fu.maintenance()
        log.info("Database up to date!")
예제 #5
0
 def train(self, since):
     log.debug("Train tendency")
     features, targets = self._prepare(since, with_targets=True)
     features = mb.reshape(features.values)
     targets = to_categorical(targets.values)
     if self.model is None:
         self.model = _createModel(features.shape)
     self.model.fit(
         features,
         targets,
         epochs=EPOCHS,
         batch_size=BATCH_SIZE,
         shuffle=False,
         verbose=mb.getVerbose(
         ))  # this return the history... could be ploted or something
     return self._score(since)
예제 #6
0
def _getTradeData(kraken_trades_input, since):
    """
    Read the necessary data from inputs, and start feature preparation

    It is important to keep the the returned data constant, as it is shared
    across the different models
    """
    trade_data = kraken_trades_input.read(since=since)
    log.debug("Read data from", du.toStr(trade_data.index[0]), "to",
              du.toStr(trade_data.index[-1]))
    trade_data = kraken_trades_input.resample(trade_data)
    log.debug("Resampled data from", du.toStr(trade_data.index[0]), "to",
              du.toStr(trade_data.index[-1]))
    trade_data = trade_data.loc[:, ["vwap", "volume"]]
    trade_data["vwap"] = Scaler().scaleFit(trade_data["vwap"])
    trade_data["volume"] = Scaler().scaleFit(trade_data["volume"])
    return trade_data
예제 #7
0
    def predict(self, since):
        """Call predict on the dependencies, then somehow merge the results"""

        for model in self.dependencies:  # de-bug loop
            pred_df = model.predict(since)
            pred_df = pd.DataFrame((pred_df["buy"] - pred_df["sell"]).values,
                                   columns=["action"])
            last_pred = pred_df.iat[-1, 0]
            log.debug(model.__class__.__name__, "prediction:",
                      du.toStr(du.TIME_TRAVELER.getTime()), last_pred,
                      ActionEnum(round(last_pred)))

        pred_df = ((pred_df < -MIN_PROBA).astype(int).replace(
            1, ActionEnum.SELL.value) +
                   (pred_df > MIN_PROBA).astype(int).replace(
                       1, ActionEnum.BUY.value)).replace(
                           0, ActionEnum.HODL.value)
        return cryptoAndActionTotrade(CryptoEnum.XBT.value, pred_df)
예제 #8
0
    def train(self, since):
        log.debug("Train macd")
        features = self._prepare(since)
        lm.LEDGERS[CryptoEnum.XBT].verbose = log.VERBOSE >= 4
        lm.LEDGERS[conf.QUOTE].verbose = log.VERBOSE >= 4

        param_grid = list(
            ParameterGrid({
                "fast_delay": range(9, 100, 1),
                "slow_delay": range(25, 200, 1),
                "signal_delay": range(10, 30, 1),
                # "fast_delay": [9],
                # "slow_delay": [26],
                # "signal_delay": [10],
                "score": [-42]
            }))

        sorted_param_grid = _playLoop(features, param_grid)
        log.debug("Top Ten:")
        for i in range(len(sorted_param_grid[-10:]), 0, -1):
            log.debug(sorted_param_grid[-i])

        self.model = sorted_param_grid[-1]
        score = self.model["score"]
        del self.model["score"]
        return score
예제 #9
0
    def cache(self, fresh_data=None, since=None, till=None):
        """
        Save some data to cache

        If ´fresh_data´ is given, append it to cache,
        otherwise read in database from ´since´ to ´till´ and cache it
        """
        if fresh_data is not None:
            self._cache_data = self._cache_data.append(fresh_data)
            if not self._cache_data.empty:
                self._cache_data = self._cache_data.loc[
                    self._cache_data.index[-1] -
                    du.secToNano(CACHE_REAL_TIME_LOOKBACK_DAYS * 24 * 3600):]
        else:
            log.debug("Caching data from", du.toStr(since), "to",
                      du.toStr(till), "(" + self.__class__.__name__ + ")")
            self._cache_data = self._readFromFile(since, till)
        if not self._cache_data.empty:
            self.updateCurrentRow(self._cache_data.iloc[-1])
        else:
            log.warning("Database '" + self.__class__.__name__ + "' is emtpy")
            self._cache_data = pd.DataFrame(columns=self.raw_columns)
예제 #10
0
def train(args):
    """Train the various (awesome) algorithms"""
    im.timeTravel(ib.SPLIT_DATE)

    log.debug("Train data: from", du.toStr(du.EPOCH), "to",
              du.toStr(ib.SPLIT_DATE))
    mm.trainModels(since=du.EPOCH)

    if args.graph:
        log.debug("Plot models on train data")
        mm.plotModels(since=du.EPOCH)

        log.debug("Plot models on test data")
        im.timeTravel(
            du.TIME_TRAVELER.getTime(force=True))  # back to the future
        log.debug("Test data: from", du.toStr(du.toStr(ib.SPLIT_DATE)), "to",
                  du.toStr(du.TIME_TRAVELER.getTime()))
        mm.plotModels(since=ib.SPLIT_DATE)

    log.debug("Job done!")

    if args.graph:
        import matplotlib.pyplot as plt
        plt.show()
예제 #11
0
 def train(self, since):
     log.debug("Train extrema")
     features, targets = self._prepare(since, with_targets=True)
     self.model.fit(features, targets)
     return self.model.score(features, targets)
예제 #12
0
def test_debug():
    log.debug("debug")