Ejemplo n.º 1
0
def main(debug):
    rmsds = []
    ppts = []
    wrs = []
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']

        actions = calculateActions(min_trail)

        df = loadData(currency, interval, 'test')

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for i, row in df.iterrows():
            df_inner = df.loc[i:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)
            # logging.warn('{0} {1}'.format(i, r))

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]
        win_ratio = np.mean(wins)

        # ppt
        ppt = np.mean(rewards) * pip_mul

        logging.warn('{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'.format(
            currency,
            None,
            int(rmsd),
            int(ppt),
            sum(rewards),
            win_ratio * 100,
            np.mean(ticks),
        ))

        rmsds.append(rmsd)
        ppts.append(ppt)
        wrs.append(win_ratio)

    logging.error('RMSD {0:.0f} +- {1:.0f}'.format(np.mean(rmsds), np.std(rmsds)))
    logging.error('PPT {0:.0f} +- {1:.0f}'.format(np.mean(ppts), np.std(ppts)))
    logging.error('WR {0:.0f} +- {1:.0f}'.format(np.mean(wrs) * 100, np.std(wrs) * 100))
Ejemplo n.º 2
0
def main(debug):
    pt = PrettyTable(
        ['Currency', 'min trail', 'date', '1', '2', '3', '4', '5'])
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']

        actions = calculateActions(min_trail)

        df = loadData(currency, interval)

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        df_last = df[-1:]
        row = df_last.iloc[-1]
        predictions = predict(df, q, PERIODS, actions, pip_mul, row)

        # logging.warn('{0} {1} {2}'.format(currency, row.name, a))
        pt.add_row([currency, min_trail, row.name] + predictions)

    print pt
Ejemplo n.º 3
0
def main(debug):
    interval = choice(INTERVALS)

    for currency, min_trail in CURRENCIES.iteritems():
        pip_mul = 100. if 'JPY' in currency else 10000.
        actions = calculateActions(min_trail)

        df = loadData(currency, interval)
        df = df[-1000:]

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        df_last = df[-1:]
        a = predict(df, q, PERIODS, actions)

        row = df_last.iloc[-1]

        a_trade, a_trail = a.split('-')
        if a_trade == 'buy':
            stop_loss = row['close'] - (float(a_trail) / pip_mul)
        else:
            stop_loss = row['close'] + (float(a_trail) / pip_mul)

        logging.warn('{0} {1} a:{2} t:{3} sl:{4:.4f}'.format(
            row.name,
            currency,
            a_trade,
            a_trail,
            stop_loss,
        ))
Ejemplo n.º 4
0
def main(debug):
    interval = choice(INTERVALS)

    for currency, min_trail in CURRENCIES.iteritems():
        pip_mul = 100. if 'JPY' in currency else 10000.
        actions = calculateActions(min_trail)

        df = loadData(currency, interval)
        df = df[-1000:]

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        df_last = df[-1:]
        a = predict(df, q, PERIODS, actions)

        row = df_last.iloc[-1]

        a_trade, a_trail = a.split('-')
        if a_trade == 'buy':
            stop_loss = row['close'] - (float(a_trail) / pip_mul)
        else:
            stop_loss = row['close'] + (float(a_trail) / pip_mul)

        logging.warn('{0} {1} a:{2} t:{3} sl:{4:.4f}'.format(
            row.name,
            currency,
            a_trade,
            a_trail,
            stop_loss,
        ))
Ejemplo n.º 5
0
def main(debug):
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']

        actions = calculateActions(min_trail)

        df = loadData(currency, interval)

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for x in xrange(2000):
            index_start = randint(0, len(df) - 20)
            df_inner = df.iloc[index_start:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]

        logging.warn(
            '{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'
            .format(
                currency,
                None,
                int(rmsd),
                int(np.mean(rewards) * pip_mul),
                sum(rewards),
                np.mean(wins) * 100,
                np.mean(ticks),
            ))
Ejemplo n.º 6
0
def main(debug):
    interval = choice(INTERVALS)

    for currency, min_trail in CURRENCIES.iteritems():
        pip_mul = 100. if 'JPY' in currency else 10000.
        actions = calculateActions(min_trail)

        df = loadData(currency, interval)
        df = df[-2000:]

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for x in xrange(1000):
            index_start = randint(0, len(df) - 1)
            df_inner = df.iloc[index_start:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]

        logging.warn(
            '{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'
            .format(
                currency,
                None,
                int(rmsd),
                int(np.mean(rewards) * pip_mul),
                sum(rewards),
                np.mean(wins) * 100,
                np.mean(ticks),
            ))
Ejemplo n.º 7
0
def main(debug):
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']

        actions = calculateActions(min_trail)

        df = loadData(currency, interval)

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for x in xrange(2000):
            index_start = randint(0, len(df)-20)
            df_inner = df.iloc[index_start:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]

        logging.warn('{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'.format(
            currency,
            None,
            int(rmsd),
            int(np.mean(rewards) * pip_mul),
            sum(rewards),
            np.mean(wins) * 100,
            np.mean(ticks),
        ))
Ejemplo n.º 8
0
def main(debug):
    interval = choice(INTERVALS)

    for currency, min_trail in CURRENCIES.iteritems():
        pip_mul = 100. if 'JPY' in currency else 10000.
        actions = calculateActions(min_trail)

        df = loadData(currency, interval)
        df = df[-2000:]

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for x in xrange(1000):
            index_start = randint(0, len(df)-1)
            df_inner = df.iloc[index_start:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]

        logging.warn('{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'.format(
            currency,
            None,
            int(rmsd),
            int(np.mean(rewards) * pip_mul),
            sum(rewards),
            np.mean(wins) * 100,
            np.mean(ticks),
        ))
Ejemplo n.º 9
0
def main(equity, debug):
    pips = []
    pt = PrettyTable(
        ['Currency', 'min trail', 'date', '1', '2', '3', '4', '5'])
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']
        logging.warn('{0}...'.format(currency))

        actions = calculateActions(min_trail)

        df = loadData(currency, interval, 'test')

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        df_last = df[-1:]
        row = df_last.iloc[-1]
        predictions = predict(df, q, PERIODS, actions, pip_mul, row)

        # logging.warn('{0} {1} {2}'.format(currency, row.name, a))
        pt.add_row([currency, min_trail,
                    str(row.name).split(' ')[0]] + predictions)

        pips.append(int(predictions[0].split(' ')[0].split('-')[1]))

    print pt

    equity = float(equity)
    risk = 0.10
    available = equity * risk
    logging.info('Risk ${0:.0f} from ${1:.0f} at {2:.0f}%'.format(
        available, equity, risk * 100))

    total_pips = sum(pips)
    lot_size = available / total_pips
    lot_size /= len(pips)
    logging.warn('Lot size = {0:.2f}'.format(lot_size))
Ejemplo n.º 10
0
def main(equity, debug):
    pips = []
    pt = PrettyTable(['Currency', 'min trail', 'date', '1', '2', '3', '4', '5'])
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']
        logging.warn('{0}...'.format(currency))

        actions = calculateActions(min_trail)

        df = loadData(currency, interval, 'test')

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        df_last = df[-1:]
        row = df_last.iloc[-1]
        predictions = predict(df, q, PERIODS, actions, pip_mul, row)

        # logging.warn('{0} {1} {2}'.format(currency, row.name, a))
        pt.add_row([currency, min_trail, str(row.name).split(' ')[0]] + predictions)

        pips.append(int(predictions[0].split(' ')[0].split('-')[1]))

    print pt

    equity = float(equity)
    risk = 0.10
    available = equity * risk
    logging.info('Risk ${0:.0f} from ${1:.0f} at {2:.0f}%'.format(available, equity, risk * 100))

    total_pips = sum(pips)
    lot_size = available / total_pips
    lot_size /= len(pips)
    logging.warn('Lot size = {0:.2f}'.format(lot_size))
Ejemplo n.º 11
0
def main(debug):
    interval = choice(INTERVALS)

    minutes = 0
    while True:
        minutes += 1
        seconds_to_run = 60 * minutes
        seconds_info_intervals = seconds_to_run / 4
        logging.error('Training each currency for {0} minutes'.format(minutes))

        # shuffle(CURRENCIES)
        for currency, min_trail in CURRENCIES.iteritems():
            pip_mul = 100. if 'JPY' in currency else 10000.
            actions = calculateActions(min_trail)

            df = loadData(currency, interval)
            df = df[-2000:]

            df = getBackgroundKnowledge(df, PERIODS)
            # print df
            # break

            alpha = 0
            epsilon = 0
            q = loadQ(currency, interval)

            time_start = time()
            time_interval = 0

            epoch = 0
            rewards = []
            errors = []
            ticks = []
            logging.warn('Training {0} on {1} with {2} ticks...'.format(currency, interval, len(df)))
            while True:
                epoch += 1
                logging.info(' ')
                logging.info('{0}'.format('=' * 20))
                logging.info('EPOCH {0}'.format(epoch))

                index_start = randint(0, len(df)-20)
                df_inner = df.iloc[index_start:]
                logging.info('Epoch: at {0} with {1} ticks'.format(index_start, len(df_inner)))
                q, r, error, tick = train(df_inner, q, alpha, epsilon, PERIODS, actions, pip_mul)

                # results
                rewards.append(r)
                errors.append(error * pip_mul)
                ticks.append(tick)

                # win ratio
                wins = [1. if r > 0. else 0. for r in rewards]
                win_ratio = np.mean(wins)
                # logging.error('wr {0}'.format(win_ratio))

                # adjust values
                epsilon = np.sqrt((1 - win_ratio) * 100.) / 100.
                alpha = epsilon / 2.
                # logging.error('new alpha = {0}'.format(alpha))

                if time() - time_start > time_interval or debug:

                    # prune lengths
                    while len(rewards) > 1000 + minutes * 1000:
                        rewards.pop(0)
                        errors.pop(0)
                        ticks.pop(0)

                    # RMSD
                    rmsd = np.sqrt(np.mean([e**2 for e in errors]))
                    logging.info('RMSD: {0}'.format(rmsd))

                    logging.warn('{0} [{1:05d}] RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}, a:{7:.2f}, e:{8:.2f}]'.format(
                        currency,
                        epoch,
                        int(rmsd),
                        int(np.mean(rewards) * pip_mul),
                        sum(rewards),
                        np.mean(wins) * 100,
                        np.mean(ticks),
                        alpha * 100,
                        epsilon * 100,
                    ))

                    # exit
                    if (time() - time_start >= seconds_to_run) or debug:
                        break

                    # saveQ(currency, interval, q)
                    time_interval += seconds_info_intervals

            saveQ(currency, interval, q)

            summarizeActions(q)

            if debug:
                break  # currencies

        if debug:
            break  # forever
Ejemplo n.º 12
0
def main(debug):

    minutes = 0
    while True:
        minutes += 1
        seconds_to_run = 60 * minutes
        seconds_info_intervals = seconds_to_run / 5
        logging.error('Training each currency for {0} minutes'.format(minutes))

        shuffle(DATA)
        for info in DATA:
            logging.debug('Currency info: {0}'.format(info))
            currency = info['currency']
            interval = info['intervals'][0]
            pip_mul = info['pip_mul']

            df = loadData(currency, interval, 'train')

            df = getBackgroundKnowledge(df, PERIODS)

            alpha = 0.
            epsilon = alpha / 2.
            q = loadQ(currency, interval)

            time_start = time()
            time_interval = seconds_info_intervals

            epoch = 0
            rewards = []
            errors = []
            ticks = []
            logging.warn('Training {0} on {1} with {2} ticks [m:{3}]'.format(
                currency,
                interval,
                len(df),
                minutes,
            ))

            while True:
                epoch += 1
                logging.info(' ')
                logging.info('{0}'.format('=' * 20))
                logging.info('EPOCH {0}'.format(epoch))

                index_start = randint(0, len(df)-20)
                df_inner = df.iloc[index_start:]
                logging.info('Epoch: at {0} with {1} ticks'.format(index_start, len(df_inner)))
                q, r, error, tick = train(df_inner, q, alpha, epsilon, PERIODS, ACTIONS, pip_mul, info['std'])

                # results
                error *= pip_mul
                rewards.append(r)
                errors.append(error)
                ticks.append(tick)

                # win ratio
                wins = [1. if r > 0. else 0. for r in rewards]
                win_ratio = np.mean(wins)
                # logging.error('wr {0}'.format(win_ratio))

                # adjust values
                alpha = 1.0102052281586786e+000 + (-2.0307383627607809e+000 * win_ratio) + (1.0215546892913909e+000 * win_ratio**2)
                epsilon = 3.9851080604500078e-001 + (2.1874724815820201e-002 * win_ratio) + (-4.1444101741886652e-001 * win_ratio**2)
                # logging.error('new alpha = {0}'.format(alpha))

                # only do updates at interval
                if time() - time_start > time_interval or debug:

                    # prune lengths
                    while len(rewards) > 1000 + minutes * 1000:
                        rewards.pop(0)
                        errors.pop(0)
                        ticks.pop(0)

                    # RMSD
                    rmsd = np.sqrt(np.mean([e**2 for e in errors]))
                    # logging.error('RMSD: {0} from new error {1}'.format(rmsd, error))

                    logging.warn('{0} [{1:05d}] RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}, a:{7:.2f}, e:{8:.2f}]'.format(
                        currency,
                        epoch,
                        int(rmsd),
                        int(np.mean(rewards) * pip_mul),
                        sum(rewards),
                        np.mean(wins) * 100,
                        np.mean(ticks),
                        alpha * 100,
                        epsilon * 100,
                    ))

                    # exit
                    if time_interval >= seconds_to_run or debug:
                        break

                    # continue
                    time_interval += seconds_info_intervals
                    saveQ(currency, interval, q)

            saveQ(currency, interval, q)

            summarizeActions(q)

            if debug:
                break  # currencies

        if debug:
            break  # forever
Ejemplo n.º 13
0
def main(debug):
    interval = choice(INTERVALS)

    minutes = 0
    while True:
        minutes += 1
        seconds_to_run = 60 * minutes
        seconds_info_intervals = seconds_to_run / 4
        logging.error('Training each currency for {0} minutes'.format(minutes))

        # shuffle(CURRENCIES)
        for currency, min_trail in CURRENCIES.iteritems():
            pip_mul = 100. if 'JPY' in currency else 10000.
            actions = calculateActions(min_trail)

            df = loadData(currency, interval)
            df = df[-2000:]

            df = getBackgroundKnowledge(df, PERIODS)
            # print df
            # break

            alpha = 0
            epsilon = 0
            q = loadQ(currency, interval)

            time_start = time()
            time_interval = 0

            epoch = 0
            rewards = []
            errors = []
            ticks = []
            logging.warn('Training {0} on {1} with {2} ticks...'.format(
                currency, interval, len(df)))
            while True:
                epoch += 1
                logging.info(' ')
                logging.info('{0}'.format('=' * 20))
                logging.info('EPOCH {0}'.format(epoch))

                index_start = randint(0, len(df) - 20)
                df_inner = df.iloc[index_start:]
                logging.info('Epoch: at {0} with {1} ticks'.format(
                    index_start, len(df_inner)))
                q, r, error, tick = train(df_inner, q, alpha, epsilon, PERIODS,
                                          actions, pip_mul)

                # results
                rewards.append(r)
                errors.append(error * pip_mul)
                ticks.append(tick)

                # win ratio
                wins = [1. if r > 0. else 0. for r in rewards]
                win_ratio = np.mean(wins)
                # logging.error('wr {0}'.format(win_ratio))

                # adjust values
                epsilon = np.sqrt((1 - win_ratio) * 100.) / 100.
                alpha = epsilon / 2.
                # logging.error('new alpha = {0}'.format(alpha))

                if time() - time_start > time_interval or debug:

                    # prune lengths
                    while len(rewards) > 1000 + minutes * 1000:
                        rewards.pop(0)
                        errors.pop(0)
                        ticks.pop(0)

                    # RMSD
                    rmsd = np.sqrt(np.mean([e**2 for e in errors]))
                    logging.info('RMSD: {0}'.format(rmsd))

                    logging.warn(
                        '{0} [{1:05d}] RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}, a:{7:.2f}, e:{8:.2f}]'
                        .format(
                            currency,
                            epoch,
                            int(rmsd),
                            int(np.mean(rewards) * pip_mul),
                            sum(rewards),
                            np.mean(wins) * 100,
                            np.mean(ticks),
                            alpha * 100,
                            epsilon * 100,
                        ))

                    # exit
                    if (time() - time_start >= seconds_to_run) or debug:
                        break

                    # saveQ(currency, interval, q)
                    time_interval += seconds_info_intervals

            saveQ(currency, interval, q)

            summarizeActions(q)

            if debug:
                break  # currencies

        if debug:
            break  # forever
Ejemplo n.º 14
0
def main(debug):

    minutes = 0
    while True:
        minutes += 1
        seconds_to_run = 60 * minutes
        seconds_info_intervals = seconds_to_run / 5
        logging.error('Training each currency for {0} minutes'.format(minutes))

        shuffle(DATA)
        for info in DATA:
            logging.debug('Currency info: {0}'.format(info))
            currency = info['currency']
            interval = info['intervals'][0]
            pip_mul = info['pip_mul']

            df = loadData(currency, interval, 'train')

            df = getBackgroundKnowledge(df, PERIODS)

            alpha = 0.
            epsilon = alpha / 2.
            q = loadQ(currency, interval)

            time_start = time()
            time_interval = seconds_info_intervals

            epoch = 0
            rewards = []
            errors = []
            ticks = []
            logging.warn('Training {0} on {1} with {2} ticks [m:{3}]'.format(
                currency,
                interval,
                len(df),
                minutes,
            ))

            while True:
                epoch += 1
                logging.info(' ')
                logging.info('{0}'.format('=' * 20))
                logging.info('EPOCH {0}'.format(epoch))

                index_start = randint(0, len(df) - 20)
                df_inner = df.iloc[index_start:]
                logging.info('Epoch: at {0} with {1} ticks'.format(
                    index_start, len(df_inner)))
                q, r, error, tick = train(df_inner, q, alpha, epsilon, PERIODS,
                                          ACTIONS, pip_mul, info['std'])

                # results
                error *= pip_mul
                rewards.append(r)
                errors.append(error)
                ticks.append(tick)

                # win ratio
                wins = [1. if r > 0. else 0. for r in rewards]
                win_ratio = np.mean(wins)
                # logging.error('wr {0}'.format(win_ratio))

                # adjust values
                alpha = 1.0102052281586786e+000 + (
                    -2.0307383627607809e+000 *
                    win_ratio) + (1.0215546892913909e+000 * win_ratio**2)
                epsilon = 3.9851080604500078e-001 + (
                    2.1874724815820201e-002 *
                    win_ratio) + (-4.1444101741886652e-001 * win_ratio**2)
                # logging.error('new alpha = {0}'.format(alpha))

                # only do updates at interval
                if time() - time_start > time_interval or debug:

                    # prune lengths
                    while len(rewards) > 1000 + minutes * 1000:
                        rewards.pop(0)
                        errors.pop(0)
                        ticks.pop(0)

                    # RMSD
                    rmsd = np.sqrt(np.mean([e**2 for e in errors]))
                    # logging.error('RMSD: {0} from new error {1}'.format(rmsd, error))

                    logging.warn(
                        '{0} [{1:05d}] RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}, a:{7:.2f}, e:{8:.2f}]'
                        .format(
                            currency,
                            epoch,
                            int(rmsd),
                            int(np.mean(rewards) * pip_mul),
                            sum(rewards),
                            np.mean(wins) * 100,
                            np.mean(ticks),
                            alpha * 100,
                            epsilon * 100,
                        ))

                    # exit
                    if time_interval >= seconds_to_run or debug:
                        break

                    # continue
                    time_interval += seconds_info_intervals
                    saveQ(currency, interval, q)

            saveQ(currency, interval, q)

            summarizeActions(q)

            if debug:
                break  # currencies

        if debug:
            break  # forever
Ejemplo n.º 15
0
def main(debug):
    rmsds = []
    ppts = []
    wrs = []
    for info in DATA:
        currency = info['currency']
        min_trail = info['trail']
        interval = info['intervals'][0]
        pip_mul = info['pip_mul']

        actions = calculateActions(min_trail)

        df = loadData(currency, interval, 'test')

        df = getBackgroundKnowledge(df, PERIODS)
        # print df
        # break

        q = loadQ(currency, interval)

        rewards = []
        errors = []
        ticks = []
        for i, row in df.iterrows():
            df_inner = df.loc[i:]
            q, r, error, tick = test(df_inner, q, PERIODS, actions, pip_mul)
            # logging.warn('{0} {1}'.format(i, r))

            # results
            rewards.append(r)
            errors.append(error * pip_mul)
            ticks.append(tick)

        # RMSD
        rmsd = np.sqrt(np.mean([e**2 for e in errors]))

        # win ratio
        wins = [1. if r > 0. else 0. for r in rewards]
        win_ratio = np.mean(wins)

        # ppt
        ppt = np.mean(rewards) * pip_mul

        logging.warn(
            '{0} RMSD {2:03d} PPT {3:03d} WR {5:.0f}% [ticks:{6:.1f} sum:{4:.1f}]'
            .format(
                currency,
                None,
                int(rmsd),
                int(ppt),
                sum(rewards),
                win_ratio * 100,
                np.mean(ticks),
            ))

        rmsds.append(rmsd)
        ppts.append(ppt)
        wrs.append(win_ratio)

    logging.error('RMSD {0:.0f} +- {1:.0f}'.format(np.mean(rmsds),
                                                   np.std(rmsds)))
    logging.error('PPT {0:.0f} +- {1:.0f}'.format(np.mean(ppts), np.std(ppts)))
    logging.error('WR {0:.0f} +- {1:.0f}'.format(
        np.mean(wrs) * 100,
        np.std(wrs) * 100))