Exemple #1
0
# gradient clipping - to avoid gradient exploding
GRADIENT_CLIPPING = 2.5

scaler = StandardScaler()
autoencoder_path = "/home/suroot/Documents/train/daytrader/models/autoencoder-" + str(
    encoding_dim) + ".hdf5"
cache = "/home/suroot/Documents/train/daytrader/autoencoded-" + str(
    encoding_dim) + ".npy"
savePath = r'/home/suroot/Documents/train/daytrader/'
path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data
model_name = "raw_ts_model2"

use_cache = True

if (not use_cache or not os.path.isfile(cache)):
    data = dt.loadData(path)
    for i in range(data.shape[0]):
        data[i, ] = (data[i, ] / data[i, -20]) - 1.0
    # scale data .. don't forget to stor the scaler weights as we will need them after.
    data = scaler.fit_transform(data)
    print(data.shape)
    # cache this data and the scaler weights.
    np.save(cache, data)
    # TODO: cache the scaler weights

print("loading cached data")
full_data = np.load(cache)
holdout = full_data[0:holdout_size, :]
data = full_data[holdout_size:, :]
print(data.shape)
num_stacked_layers = 2 
# gradient clipping - to avoid gradient exploding
GRADIENT_CLIPPING = 2.5

scaler = StandardScaler() 
cache = "/home/suroot/Documents/train/daytrader/seq2seq_raw-"+str(encoding_dim)+".npy"
#savePath = r'/home/suroot/Documents/train/daytrader/'
#path =r'/home/suroot/Documents/train/daytrader/ema-crossover' # path to data
savePath = r'/home/suroot/Documents/train/raw/'
path =r'/home/suroot/Documents/train/raw/22222c82-59d1-4c56-a661-3e8afa594e9a' # path to data
model_name = "pca2_ts_model"

use_cache = False

if( not use_cache or not os.path.isfile(cache) ):
    data = dt.loadData(path, symbols=dt.CA_EXTRA)
    #for i in range(data.shape[0]):
    #    data[i,] = (data[i,]/data[i,-20]) - 1.0
    # scale data .. don't forget to stor the scaler weights as we will need them after.
    data_scaled = scaler.fit_transform(data) 
    pca = PCA(n_components=encoding_dim, svd_solver='full')
    data_reduced = pca.fit_transform(data_scaled) 
    print(data.shape) 
    # cache this data and the scaler weights.
    np.save(cache, data_reduced)
    # TODO: cache the scaler weights

print("loading cached data")
full_data = np.load(cache)
holdout = full_data[0:holdout_size,:]
data = full_data[holdout_size:,:]
Exemple #3
0
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)

plt.rcParams['interactive'] == True

# fix random seed for reproducibility
np.random.seed(90210)

subset = -1

path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data

data = dt.loadData(path, subset)

(data, labels) = dt.centerAroundEntry(data, -20)
print(data.shape)

print(np.sort(labels))
print("min: " + str(labels.min()))
print("max: " + str(labels.max()))

sns.distplot(labels)
plt.show()

(data2, labels2) = dt.filterOutliers(data, labels, 0.018, -0.016)

sns.distplot(labels2)
plt.show()
Exemple #4
0
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)

plt.rcParams['interactive'] == True

# fix random seed for reproducibility
np.random.seed(90210)

subset = -1

path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data

data_unsorted = dt.loadData(path, subset)

data = np.array(sorted(data_unsorted, key=lambda x: (x[-20] / x[-1]) - 1.0))

print(data.shape)

labels = data[:, -1]
entries = data[:, -20]

print((labels[0] / entries[0]) - 1.0)
print((labels[len(labels) - 1] / entries[len(entries) - 1]) - 1.0)

#dt.plotTrainingExample(data[0,:])
dt.plotTrainingExample(data[-1, :])
dt.plotTrainingExample(data[-2, :])
dt.plotTrainingExample(data[-3, :])
Exemple #5
0

# fix random seed for reproducibility
np.random.seed(90210)
crop_future = -20
class_to_view = 5   # class

num_classes = 5

input_size = 128

#savePath = r'/home/suroot/Documents/train/daytrader/'
#path =r'/home/suroot/Documents/train/daytrader/ema-crossover' # path to data
savePath = r'/home/suroot/Documents/train/raw/'
path =r'/home/suroot/Documents/train/raw/22222c82-59d1-4c56-a661-3e8afa594e9a' # path to data
original = dt.loadData(path, symbols=dt.CA_EXTRA)
(data, labels_classed, _) = dt.cacheLoadData(path, crop_future, num_classes, input_size, symbols=dt.CA_EXTRA)
print(data.shape)

x_train, x_test, y_train, y_test = train_test_split(data, labels_classed, test_size=0.1)
_, x_original, _, _ = train_test_split(original, labels_classed, test_size=0.1)

print("TEST SIZE: " + str(x_test.shape))

#/home/suroot/Documents/train/raw/mlp[3]-28-0.48.hdf5
from keras.models import load_model
model = load_model(savePath+'mlp[5]-48-0.41.hdf5')

model.summary()

class5Data = []