コード例 #1
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
    File name: scatter_plot.[extension].py
    Description: Scatter plot for MyDataSet instance.
    Author: Mathilde DUVERGER
    Date created: 2018/10/08
    Python Version: 3.6
"""

from mydataset import MyDataSet

dataset_train = MyDataSet().read_csv('resources/dataset_train.csv')
dataset_train.plot_scatter()
コード例 #2
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
    File name: describe.[extension].py
    Description: Describe input CSV DataSet.
    Author: Mathilde DUVERGER
    Date created: 2018/10/08
    Python Version: 3.6
"""

from mydataset import MyDataSet
import sys

path = str(sys.argv[1])
dataset_train = MyDataSet().read_csv(path)
dataset_train.describe()
コード例 #3
0
ファイル: train.py プロジェクト: heli06/MyCNN
# In[2]:

EPOCH = 25
BATCH_SIZE = 4
LR = 0.001

# In[3]:

transform = transforms.Compose([
    transforms.Resize([96, 96]),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0, 0, 0), std=(1, 1, 1))
])

train_set = MyDataSet('./birds/train/', transform=transform, labels=None)
train_loader = Data.DataLoader(train_set,
                               batch_size=BATCH_SIZE,
                               shuffle=True,
                               num_workers=4)
test_set = MyDataSet('./birds/test/', transform=transform, labels=None)
test_loader = Data.DataLoader(test_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=4)

# In[4]:

print(train_set.classes)
print(train_set.class_to_idx)
print(train_set.__len__)
コード例 #4
0
ファイル: logreg_train.py プロジェクト: mathwild/42DSLR
"""
    File name: logreg_train.[extension].py
    Description: Train logistic regression.
    Author: Mathilde DUVERGER, Alessandro GIRELLI, Louis LIMNAVONG
    Date created: 2018/10/08
    Python Version: 3.6
"""

from mydataset import MyDataSet
from logregmodel import LogRegModel
from preprocessing import full_one_hot_encoder, to_matrix
import sys

path = str(sys.argv[1])
print(path)
dataset_train = MyDataSet().read_csv(path)

# getting X
DictX = dataset_train[['Best Hand', 'Astronomy', 'Herbology',
                       'Defense Against the Dark Arts',
                       'Muggle Studies', 'Ancient Runes',
                       'History of Magic', 'Transfiguration', 
                       'Charms', 'Flying']]

DictX_encod = full_one_hot_encoder(DictX)
X = to_matrix(DictX_encod)
# getting Y
Y = dataset_train['Hogwarts House']
###

model = LogRegModel()
コード例 #5
0
"""
    File name: logreg_train.[extension].py
    Description: Predict logistic regression.
    Author: Mathilde DUVERGER, Alessandro GIRELLI, Louis LIMNAVONG
    Date created: 2018/10/08
    Python Version: 3.6
"""

from mydataset import MyDataSet
from logregmodel import LogRegModel
from preprocessing import full_one_hot_encoder, to_matrix
import sys
import csv

path = str(sys.argv[1])
dataset_test = MyDataSet().read_csv(path)

# getting X
DictX_test = dataset_test[[
    'Best Hand', 'Astronomy', 'Herbology', 'Defense Against the Dark Arts',
    'Muggle Studies', 'Ancient Runes', 'History of Magic', 'Transfiguration',
    'Charms', 'Flying'
]]

DictX_encod_test = full_one_hot_encoder(DictX_test)
X_test = to_matrix(DictX_encod_test)
###

dataset_train = MyDataSet().read_csv('resources/dataset_train.csv')

# getting X
コード例 #6
0
def main(operation='train', code=None):
    step = 30
    input_size = 62
    train_steps = 1000000
    batch_size = 512
    learning_rate = 0.001
    hidden_size = 14
    nclasses = 1
    validation_size = 700
    keep_rate = 0.7

    selector = [
        "ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL",
        "MA", "VMA", "PRICE_VOLUME", "AVERAGE"
    ]
    input_shape = [30, 62]  # [length of time series, length of feature]

    if operation == 'train':
        train_features = []
        train_labels = []
        val_features = []
        val_labels = []
        binance = get_binance()

        merge_bean = SmartLSTMPair("BTCUSDT", "1h", binance)
        raw_data = merge_bean.get_history_data()
        X = numpy.array(raw_data)
        moving_features, moving_labels = extract_feature(raw_data=X,
                                                         selector=selector,
                                                         window=input_shape[0],
                                                         with_label=True,
                                                         flatten=False)
        train_features.extend(moving_features[:-validation_size])
        train_labels.extend(moving_labels[:-validation_size])
        val_features.extend(moving_features[-validation_size:])
        val_labels.extend(moving_labels[-validation_size:])

        merge_bean = SmartLSTMPair("ETHUSDT", "1h", binance)
        raw_data = merge_bean.get_history_data()
        X = numpy.array(raw_data)
        moving_features, moving_labels = extract_feature(raw_data=X,
                                                         selector=selector,
                                                         window=input_shape[0],
                                                         with_label=True,
                                                         flatten=False)
        train_features.extend(moving_features[:-validation_size])
        train_labels.extend(moving_labels[:-validation_size])
        val_features.extend(moving_features[-validation_size:])
        val_labels.extend(moving_labels[-validation_size:])

        train_features = numpy.transpose(numpy.asarray(train_features),
                                         [0, 2, 1])
        train_labels = numpy.asarray(train_labels)
        train_labels = numpy.reshape(train_labels, [train_labels.shape[0], 1])
        val_features = numpy.transpose(numpy.asarray(val_features), [0, 2, 1])
        val_labels = numpy.asarray(val_labels)
        val_labels = numpy.reshape(val_labels, [val_labels.shape[0], 1])
        train_set = MyDataSet(train_features, train_labels)
        val_set = MyDataSet(val_features, val_labels)

        # raw_data = read_sample_data("toy_stock.csv")
        # moving_features, moving_labels = extract_feature(raw_data=raw_data, selector=selector, window=input_shape[0],
        #                                                 with_label=True, flatten=False)
        # moving_features = numpy.asarray(moving_features)
        # moving_features = numpy.transpose(moving_features, [0, 2, 1])
        # moving_labels = numpy.asarray(moving_labels)
        # moving_labels = numpy.reshape(moving_labels, [moving_labels.shape[0], 1])
        # train_set = DataSet(moving_features[:-validation_size], moving_labels[:-validation_size])
        # val_set = DataSet(moving_features[-validation_size:], moving_labels[-validation_size:])

        trader = SmartTrader(step, input_size, learning_rate, hidden_size,
                             nclasses)
        trader.build_graph()
        train(trader,
              train_set,
              val_set,
              train_steps,
              batch_size=batch_size,
              keep_rate=keep_rate)
    elif operation == "predict":
        binance = get_binance()

        merge_bean = SmartLSTMPair("BTCUSDT", "1h", binance)
        raw_data = merge_bean.get_history_data()
        X = numpy.array(raw_data)
        moving_features, moving_labels = extract_feature(raw_data=X,
                                                         selector=selector,
                                                         window=input_shape[0],
                                                         with_label=True,
                                                         flatten=False)
        moving_features = numpy.asarray(moving_features)
        moving_features = numpy.transpose(moving_features, [0, 2, 1])
        moving_labels = numpy.asarray(moving_labels)
        moving_labels = numpy.reshape(moving_labels,
                                      [moving_labels.shape[0], 1])
        # train_set = DataSet(moving_features[:-validation_size], moving_labels[:-validation_size])
        val_set = MyDataSet(moving_features[-validation_size:],
                            moving_labels[-validation_size:])
        predict(X,
                val_set,
                step=step,
                input_size=input_size,
                learning_rate=learning_rate,
                hidden_size=hidden_size,
                nclasses=nclasses)

    else:
        print("Operation not supported. ")