コード例 #1
0
    def __init__(self,
                 cryptoCoin='ETH',
                 buyPercentage=0.5,
                 initialUsd=1000.0,
                 initialCrypto=1.0,
                 isRealTime=True):
        self.initialCrypto = initialCrypto
        self.initialUsd = initialUsd
        self.isRealTime = isRealTime

        self.startTime = datetime.datetime.now().isoformat()
        self.usd = initialUsd
        self.crypto = initialCrypto
        self.cryptoCoin = cryptoCoin
        self.coinPair = '{}-USD'.format(self.cryptoCoin)
        self.buyPercentage = buyPercentage

        self.predictor = Predict_model(modelName)
        self.pastCurrentPrice = None
        self.pastPastCurrentPrice = None
        self.predictedDelta = None
        self.predictPrice = None

        self.valueHistory = []

        startingData = data_loader.getCandles('ETH-USD', 60,
                                              save=False)[['open']]

        for currentPrice in startingData.values:
            if self.pastCurrentPrice:
                self.predictor.get_model(self.pastCurrentPrice, currentPrice)

            self.pastPastCurrentPrice = self.pastCurrentPrice
            self.pastCurrentPrice = currentPrice
コード例 #2
0
import data_loader
import pandas as pd
import logging
from statsmodels.graphics import tsaplots

logging.basicConfig(level=logging.INFO)

def stats_graph(data):
    def label(ax, string):
        ax.annotate(string, (1, 1), xytext=(-8, -8), ha='right', va='top', xycoords='axes fraction', textcoords='offset points')

    fig, axes = plt.subplots(nrows=2)
    fig.tight_layout()

    axes[0].plot(data)
    label(axes[0], 'Raw Data')

    pd.plotting.autocorrelation_plot(data, ax=axes[1])
    label(axes[1], 'Pandas Autocorrelation')

    # Remove some of the titles and labels that were automatically added
    for ax in axes.flat:
        ax.set(title='', xlabel='')
    plt.show()

candles = data_loader.getCandles('ETH-USD', 60, start='2018-02-14T00:00:25+01:00', end='2018-03-14T00:00:25+01:00', save=True)
data = candles['open']
ewma = pd.ewma(data, span=30)


stats_graph(data)
コード例 #3
0
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))

import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
from plotly import tools
import data_loader
from utils.indicators import addTendency
from utils.plots import candlesPlot, closesPlot, movingAveragePlot, bollingerBandsPlots, volumePlot, tendencyShapes

ticks = data_loader.getCandles('ETH-USD',
                               60,
                               start='2016-10-14T00:00:25+01:00',
                               end='2018-03-22T00:00:25+01:00',
                               save=True)

ticks = data_loader.getCandles('ETH-USD', 3600, save=False)
addTendency(ticks, threshold=3)

fig = tools.make_subplots(rows=2, cols=1)

fig.append_trace(candlesPlot(ticks), 1, 1)
fig.append_trace(closesPlot(ticks), 1, 1)
fig.append_trace(movingAveragePlot(ticks, 10), 1, 1)

(bbPlot1, bbPlot2) = bollingerBandsPlots(ticks, 10)
fig.append_trace(bbPlot1, 1, 1)
fig.append_trace(bbPlot2, 1, 1)
コード例 #4
0
                    action='store_const',
                    const=True,
                    default=False,
                    help='Only visualize results of previously saved model')
parser.add_argument('-r',
                    '--resume',
                    action='store_const',
                    const=True,
                    default=False,
                    help='resume from previous checkpoint')

args = parser.parse_args()
"""Get Data"""
dataset = data_loader.getCandles('ETH-USD',
                                 60,
                                 start='2018-02-01T00:00:25+01:00',
                                 end='2018-05-01T00:00:25+01:00',
                                 save=True)[['open']]
"""###Normalize data"""

# normalize the dataset
dataset1 = dataset.values[:-1]
dataset2 = dataset.values[1:]

dataset_value = dataset
dataset = dataset2 / dataset1
"""###Split data into training and test. Training is the past, test is the future."""

# split into train and test sets
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
コード例 #5
0
logging.basicConfig(level=logging.INFO)

def stats_graph(data):
    def label(ax, string):
        ax.annotate(string, (1, 1), xytext=(-8, -8), ha='right', va='top',
                    size=14, xycoords='axes fraction', textcoords='offset points')

    fig, axes = plt.subplots(nrows=4, figsize=(8, 12))
    fig.tight_layout()

    axes[0].plot(data)
    label(axes[0], 'Raw Data')

    axes[1].acorr(data, maxlags=data.size-1)
    label(axes[1], 'Matplotlib Autocorrelation')

    tsaplots.plot_acf(data, axes[2])
    label(axes[2], 'Statsmodels Autocorrelation')

    pd.plotting.autocorrelation_plot(data, ax=axes[3])
    label(axes[3], 'Pandas Autocorrelation')

    # Remove some of the titles and labels that were automatically added
    for ax in axes.flat:
        ax.set(title='', xlabel='')
    plt.show()

candles = data_loader.getCandles('ETH-USD', 60, '2018-02-01T00:00:25+01:00', '2018-02-28T23:58:25+01:00')
data = candles['open']
stats_graph(data)
コード例 #6
0
parser.add_argument(
    '--visualize_results',
    '-v',
    action='store_true',
    default=False,
    help='use this to visualize the results of the currently trained model')
args = parser.parse_args()

use_cuda = torch.cuda.is_available()
start_epoch = 0  # start from epoch 0 or last checkpoint epoch
best_loss = 99999

###Load data
dataset = data_loader.getCandles('ETH-USD',
                                 60,
                                 start='2018-03-12T13:19:54.527842',
                                 end='2018-03-15T13:19:54.527861',
                                 save=True)
addTendency(dataset, threshold=0.05)

scaler = StandardScaler()
scaler.fit(dataset[['open', 'volume']])
t = scaler.transform(dataset[['open', 'volume']])
tdf = pd.DataFrame(t, columns=['open_norm', 'vol_norm'], index=dataset.index)
dataset = pd.concat([dataset, tdf], axis=1)
"""###Split data into training and test. Training is the past, test is the future."""

# split into train and test sets
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
train, test = dataset.iloc[0:train_size, :], dataset.iloc[
コード例 #7
0
if __name__ == '__main__':

    if args.realtime:
        logging.basicConfig(level=logging.INFO)
        sim = Simulator()
        tickerFeed = TickerFeed(sim.coinPair, 60)
        tickerFeed.onTickerReceived(sim.process)
        tickerFeed.run()
    elif args.perfect:
        logging.basicConfig(level=logging.INFO)
        logging.info('USING A PERFECT MODEL')
        sim = Simulator(isRealTime=False)
        simTestData = data_loader.getCandles('ETH-USD',
                                             60,
                                             start='2018-02-14T00:00:25+01:00',
                                             end='2018-03-14T00:00:25+01:00',
                                             save=True)[['open']]

        for i in range(0, len(simTestData) - 1):
            item = simTestData.iloc[i]
            price = item[0]
            time = item.name.isoformat()

            nextItem = simTestData.iloc[i + 1]
            nextPrice = nextItem[0]
            sim.process({
                'price': price,
                'time': time
            }, {'delta': nextPrice / price})
コード例 #8
0
parser.add_argument('-v',
                    '--visualize',
                    action='store_const',
                    const=True,
                    default=False,
                    help='Only visualize results of previously saved model')
parser.add_argument('-r',
                    '--resume',
                    action='store_const',
                    const=True,
                    default=False,
                    help='resume from previous checkpoint')

args = parser.parse_args()
"""Get Data"""
dataset = data_loader.getCandles('ETH-USD', 60, '2018-02-01T00:00:25+01:00',
                                 '2018-05-01T00:00:25+01:00')[['open']]
"""###Normalize data"""

# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(dataset)
dataset = scaler.transform(dataset)
scaler_path = os.path.join(os.path.dirname(__file__),
                           'checkpoint/{}/scaler.pkl'.format(checkpoint_name))
joblib.dump(scaler, scaler_path)
"""###Split data into training and test. Training is the past, test is the future."""

# split into train and test sets
train_size = int(len(dataset) * 0.7)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
コード例 #9
0
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))

import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
from plotly import tools
import data_loader
from utils.indicators import addTendency
from utils.plots import candlesPlot, closesPlot, movingAveragePlot, bollingerBandsPlots, volumePlot, tendencyShapes

#ticks = data_loader.getCandles('ETH-USD', 60, start='2016-10-14T00:00:25+01:00', end='2018-03-22T00:00:25+01:00', save=True)

ticks = data_loader.getCandles('ETH-USD', 3600, save=False)
addTendency(ticks, threshold=0.05)


fig = tools.make_subplots(rows=2, cols=1)

fig.append_trace(candlesPlot(ticks), 1, 1)
fig.append_trace(closesPlot(ticks), 1, 1)
fig.append_trace(movingAveragePlot(ticks, 10), 1, 1)

(bbPlot1, bbPlot2) = bollingerBandsPlots(ticks, 10)
fig.append_trace(bbPlot1, 1, 1)
fig.append_trace(bbPlot2, 1, 1)

fig.append_trace(volumePlot(ticks), 2, 1)