Пример #1
0
np.random.seed(90210)

num_classes = 5
batch_size = 256
epochs = 7500
crop_future = -20

input_size = 128

#savePath = r'/home/suroot/Documents/train/daytrader/'
#path =r'/home/suroot/Documents/train/daytrader/ema-crossover' # path to data
savePath = r'/home/suroot/Documents/train/raw/'
path = r'/home/suroot/Documents/train/raw/22222c82-59d1-4c56-a661-3e8afa594e9a'  # path to data
(data, labels_classed, _) = dt.cacheLoadData(path,
                                             crop_future,
                                             num_classes,
                                             input_size,
                                             symbols=dt.CA_EXTRA)
print(data.shape)

x_train, x_test, y_train, y_test = train_test_split(data,
                                                    labels_classed,
                                                    test_size=0.1)

model = Sequential()
model.add(
    Dense(128,
          activation='relu',
          input_dim=data.shape[1],
          kernel_regularizer=regularizers.l2(0.01)))
#model.add(Dropout(0.2))
Пример #2
0
# fix random seed for reproducibility
np.random.seed(90210)

num_classes = 5
batch_size = 256
epochs = 7500

input_size = 256

subset = -1  # -1 to use the entire data set

savePath = r'/home/suroot/Documents/train/daytrader/'
path = r'/home/suroot/Documents/train/daytrader/encoder-' + str(
    input_size) + '.npy'  # path to data

(data, labels_classed) = dt.cacheLoadData(path, num_classes,
                                          -1)  # just to load labels
data = np.load(path)

ss = StratifiedShuffleSplit(n_splits=1, test_size=0.1)
for train_index, test_index in ss.split(data, labels_classed):
    print("TRAIN:", train_index, "TEST:", test_index)
    x_train, x_test = data[train_index], data[test_index]
    y_train, y_test = labels_classed[train_index], labels_classed[test_index]

dt.plotTrainingExample(x_train[15, :])

model = Sequential()
model.add(
    Dense(128,
          activation='relu',
          input_dim=data.shape[1],
Пример #3
0
# num of input signals
input_dim = 1
# num of output signals
output_dim = 1
# num of stacked lstm layers
num_stacked_layers = 2
# gradient clipping - to avoid gradient exploding
GRADIENT_CLIPPING = 2.5

scaler = StandardScaler()
savePath = r'/home/suroot/Documents/train/daytrader/'
path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data

savePath = r'/home/suroot/Documents/train/daytrader/'
path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data
(data, labels_classed, _) = dt.cacheLoadData(path, crop_future, num_classes,
                                             input_size)
print("data: " + str(data.shape))
ss = StratifiedShuffleSplit(test_size=0.1)
for train_index, test_index in ss.split(data, labels_classed):
    print("TRAIN:", train_index, "TEST:", test_index)
    x_train, x_test = data[train_index], data[test_index]
    y_train, y_test = labels_classed[train_index], labels_classed[test_index]

print(x_train.shape)


def generate_train_samples(x,
                           y,
                           batch,
                           batch_size=10,
                           input_seq_len=input_seq_len,
Пример #4
0
from keras.models import Model

# fix random seed for reproducibility
np.random.seed(90210)

num_classes = 5
batch_size = 256
epochs = 2500

input_size = -1

subset = -1  # -1 to use the entire data set

savePath = r'/home/suroot/Documents/train/daytrader/'
path = r'/home/suroot/Documents/train/daytrader/ema-crossover'  # path to data
(data, labels_classed) = dt.cacheLoadData(path, num_classes, input_size)

x_train = data
y_train = labels_classed

# visualize some data from training
#dt.plotTrainingExample(data[50,:])
#dt.plotTrainingExample(data[150,:])
#dt.plotTrainingExample(data[4500,:])

# this is the size of our encoded representations
encoding_dim = 128

# this is our input placeholder
input = Input(shape=(data.shape[1], ))
# "encoded" is the encoded representation of the input