Ejemplo n.º 1
0
 def __init__(self):
     self.stds, self.means = self.load_scaler()
     self.sequential = rm.Sequential(
         [rm.Lstm(30), rm.Lstm(10),
          rm.Dense(pred_length)])
     self.oanda = oandapy.API(environment="practice", access_token=token)
     self.res = self.oanda.get_history(instrument="USD_JPY",
                                       granularity=gran,
                                       count=look_back + pred_length + 78)
     self.prep = Preprocess(self.res)
     self.df = self.prep.data
     self.data = self.standardize()
     self.exp, self.target = self.create_dataset()
     self.pred = self.predict()
     self.pred_side = self.predict_side()
Ejemplo n.º 2
0
def test_gpu_lstm(a):
    layer = rm.Lstm(output_size=2)

    def func(x):
        loss = 0
        for _ in range(5):
            loss += sum(layer(x))
        layer.truncate()
        return loss

    set_cuda_active(True)

    g1 = Variable(a)

    g3 = func(g1)
    g3.to_cpu()

    g = g3.grad()
    g_g1 = g.get(g1)
    g_g1.to_cpu()

    set_cuda_active(False)
    c3 = func(g1)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
Ejemplo n.º 3
0
    def __init__(self, src_filedir, tar_filedir, hidden_size=100):
        
        self.src_i2w, self.src_w2i, self.X_train = self.process_dataset(src_filedir)
        self.tar_i2w, self.tar_w2i, self.Y_train = self.process_dataset(tar_filedir, True)
        
        self.src_vocab_size = len(self.src_w2i)
        self.tar_vocab_size = len(self.tar_w2i)
        self.hidden_size = hidden_size
        
        # encoder
        self.l1 = rm.Embedding(hidden_size, self.src_vocab_size)
        self.l2 = rm.Lstm(hidden_size)

        # decoder
        self.l3 = rm.Embedding(hidden_size, self.tar_vocab_size)
        self.l4 = rm.Lstm(hidden_size)
        self.l5 = rm.Dense(self.tar_vocab_size)
Ejemplo n.º 4
0
def test_node_clear():
    DEBUG_GRAPH_INIT(True)

    a = Variable(np.random.rand(2, 2).astype(np.float32))
    b = Variable(np.random.rand(2, 2).astype(np.float32))

    layer = R.Lstm(2)

    c = layer(O.dot(a, b))  # NOQA

    DEBUG_NODE_STAT()
sub_seq, next_values = create_subseq(normal_cycle, look_back, pred_length)
X_train, X_test, y_train, y_test = train_test_split(sub_seq,
                                                    next_values,
                                                    test_size=0.2)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)

train_size = X_train.shape[0]
test_size = X_test.shape[0]
print('train size:{}, test size:{}'.format(train_size, test_size))

# モデルの定義、学習
model = rm.Sequential(
    [rm.Lstm(35),
     rm.Relu(),
     rm.Lstm(35),
     rm.Relu(),
     rm.Dense(pred_length)])

# パラメータ
batch_size = 100
max_epoch = 2000
period = 10  # early stopping checking period

optimizer = Adam()
epoch = 0
loss_prev = np.inf
learning_curve, test_curve = [], []
while (epoch < max_epoch):
 def __init__(self):
     super(EncDecAD, self).__init__()
     self.encoder = rm.Lstm(c)
     self.decoder = rm.Lstm(c)
     self.linear = rm.Dense(m)
def draw_pred_curve(e_num):
    pred_curve = []
    arr_now = X_test[0]
    for _ in range(test_size):
        for t in range(look_back):
            pred = model(np.array([arr_now[t]]))
        model.truncate()
        pred_curve.append(pred[0])
        arr_now = np.delete(arr_now, 0)
        arr_now = np.append(arr_now, pred)
    plt.plot(x[:train_size+look_back], y[:train_size+look_back], color='blue')
    plt.plot(x[train_size+look_back:], pred_curve, label='epoch:'+str(e_num)+'th')

#モデルの定義
model = rm.Sequential([
    rm.Lstm(2),
    rm.Dense(1)
])

#各パラメータの設定
batch_size = 5
max_epoch = 1000
period = 200
optimizer = Adam()

#Train Loop
i= 0
loss_prev = np.inf

#Learning curves
learning_curve = []