Esempio n. 1
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    #指定神经元个数为100
    mem_cell_ct = 100
    #制定输入样本的特征维度数量 输入4个样本,每个样本是1*50的维度
    x_dim = 50
    #用神经元个数和样本维度数初始化LSTM
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    #初始化网络
    lstm_net = LstmNetwork(lstm_param)
    #标签、目标输出
    y_list = [-0.5, 0.2, 0.1, -0.5]
    #生成输入数据
    input_val_arr = [np.random.random(x_dim) for _ in y_list]
    #for _ in y_list:
    #input_val_arr.append(np.random.random(x_dim))
    for cur_iter in range(1000):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):  #[0,1,2,3]
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 2
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    losses = []

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for cur_iter in range(100):
        # aqui estamos passando pelos epochs
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        losses.append(loss)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)  # aqui se aplica o aprendizado
        lstm_net.x_list_clear()
    return losses
Esempio n. 3
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for cur_iter in range(100):
        print("iter", "%2s" % str(cur_iter))
        for ind in range(len(y_list)):
            print("x" + str(input_val_arr[ind]))
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 4
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100  #100个lstm节点
    x_dim = 50  #输入值的维度
    concat_len = x_dim + mem_cell_ct  #150
    lstm_param = LstmParam(mem_cell_ct, x_dim)  #
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]  #4
    input_val_arr = [np.random.random(x_dim)
                     for _ in y_list]  #产生4个x_dim维的向量,每个向量维度为:50
    #即为每个向量x训练到y的模型

    for cur_iter in range(100):  #训练100次
        print "cur iter: ", cur_iter
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            print "y_pred[%d] : %f" % (ind,
                                       lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 5
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    x1 = [float(i) / 700 for i in dataAnalyze.testData().pop()]
    print x1
    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 10
    #x_dim = 50
    x_dim = len(x1) / 2
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [139 / 500, 455 / 500]
    input_val_arr = [x1[::2], x1[1::2]]
    #input_val_arr = [np.random.random(x_dim) for _ in y_list]
    #   input_val_arr = [np.random.random(x_dim) for _ in range(4)]
    #   input_val_arr = [np.random.uniform(-0.1,0.1,x_dim) for _ in y_list]

    for cur_iter in range(200):
        print "cur iter: ", cur_iter
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

            print "y_pred[%d] : %f" % (ind,
                                       lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 6
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]
    # print("input_val_arr", input_val_arr)
    # json_dump = json.dumps({'iva': input_val_arr}, cls=NumpyEncoder)
    # f = open('wg.txt', 'w')
    # json.dump(json_dump, f)
    # f.close()

    for cur_iter in range(100):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        # print("ind of: ", ind)
        # print("node_state_h", lstm_net.lstm_node_list[ind].state.h)
        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 7
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count 
    mem_cell_ct = 100
    x_dim = 50
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5,0.2,0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for cur_iter in range(100):
        print "cur iter: ", cur_iter
        print "input_val_arr=", input_val_arr
        print "y_list=", y_list
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 8
0
def example_0():  #lstm
    # learns to repeat simple sequence from random inputs
    # 从随机输入重复简单的序列学习
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    # 输入数据维度和lstm单元数量的参数
    mem_cell_ct = 100  # mem_cell_ct是lstm的神经元数目
    x_dim = 50  # x_dim是输入数据的维度
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    #y_list = [-0.5, 0.2, 0.1, -0.5]     #此 代码 其是通过自己实现 lstm 网络来逼近一个序列,y_list = [-0.5, 0.2, 0.1, -0.5]                        #
    y_list = [-0.5, 0, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]  # 输入

    #print(input_val_arr)
    for cur_iter in range(100000):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 9
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5] #这是1个样本的label,分别为每个输入word embedding的打分
    # 为每个y,生成一个x输入向量,x样本序列的长度为4,embedding维度为50
    # 类比例子,可以看成对序列中的每个向量进行pos/neg打分
    input_val_arr = [np.random.random(x_dim) for _ in y_list] # list of ndarray(dim=50), list长度为4
    print("input_val_arr:", input_val_arr)

    for cur_iter in range(210):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            # 将每个样本的x放到网络中
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" +
              ", ".join(["% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0] for ind in range(len(y_list))]) +
              "]", end=", ")
        # 将label放到网络中
        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1) # 计算网络的梯度
        lstm_net.x_list_clear() # 每次迭代时,需要将x清空
Esempio n. 10
0
def example_1():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    
    data_path = 'F:\研究生\无人机项目\二维条件下计算轨迹和时间//data_t_10.mat'
    data_xy,data_v_theta = load_data(data_path)
    
    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 2
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = data_v_theta[0,:]

    #input_val_arr = [data.append(data_xy[:,i]) for i in range(36000)]
    data = []
    for i in range(36000):
        data.append(data_xy[:,i])    
    input_val_arr  = data

    for cur_iter in range(100):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" +
              ", ".join(["% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0] for ind in range(len(y_list))]) +
              "]", end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 11
0
def example_0():
    mem_cell_ct = 100
    x_dim = 50
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)

    primes = Primes()
    x_list = []
    y_list = []
    for i in range(0, 10):
        sample = primes.get_sample(x_dim, 1, i)
        x = sample[0:x_dim]
        y = sample[x_dim:x_dim + 1].tolist()[0]
        x_list.append(x)
        y_list.append(y)

    for cur_iter in range(10000):
        if cur_iter % 1000 == 0:
            print("y_list=", y_list)
        for ind in range(len(y_list)):
            lstm_net.x_list_add(x_list[ind])
            if cur_iter % 1000 == 0:
                print("y_pred[%d] : %f" %
                      (ind, lstm_net.lstm_node_list[ind].state.h[0]))

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        if cur_iter % 1000 == 0:
            print("loss: ", loss)
        lstm_param.apply_diff(lr=0.01)
        lstm_net.x_list_clear()
Esempio n. 12
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count 
    mem_cell_ct = 100
    x_dim = 50
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)
    
    # predict number
    y_list = [-0.5, 0.2, 0.1, -0.4]
    input_val_arr = [np.random.random(x_dim) for _ in y_list] # 一个list,包含随机生成的4个 array(50)
    #  print(input_val_arr)
    
    # 四个数据为一组,循环100遍
    for cur_iter in range(100):
        print ("cur_iter : ", cur_iter)
        
        # 四个数据为一组,对应y值。进行参数更新
        for ind in range(len(y_list)):
            # 前向传播
            lstm_net.x_list_add(input_val_arr[ind])
            print ("y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0]),lstm_net.lstm_node_list[ind].state.h.shape)
            print(lstm_net.lstm_node_list[ind].state.h)

        # 反向传播
        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        # print ("loss: "), loss
        print ("loss: %f" % loss)
        
        # update the parameters
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 13
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    data = psg.load_data(FILE)
    X, Y = psg.create_dataset(data, 7)
    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 14
    x_dim = 7
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = Y[:20]
    print(y_list)
    input_val_arr = X[:20]

    # y_list = [-0.5, 0.2, 0.1, -0.5]
    # input_val_arr = [np.random.random(x_dim) for _ in y_list] # a list of array which shape is (x_dim,)

    for cur_iter in range(10000):
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()

        if cur_iter % 500 == 0:
            print("iter", "%2s" % str(cur_iter), end=": ")
            print("y_pred = [" + ", ".join([
                "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
                for ind in range(len(y_list))
            ]) + "]",
                  end=", ")
            print("loss:", "%.3e" % loss)
def example_0():

    np.random.seed(0)
    mem_cell_ct = 100

    x_dim = 1
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)

    """
    y_list = [-0.5, 0.2, 0.1, -0.5]  # 五步
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for i in range(len(input_val_arr)):
        print "input_val_arr = ", input_val_arr[i]
    """
    input_val_arr = [
        [1, 2],
        [2, 3],
        [3, 4]
    ]

    pre_x = [
        [3, 4]
    ]

    y_list = [0.03, 0.05, 0.07]
    x = np.arange(-1, 1, 0.01)
    xa = []
    for i in range(len(x)):
        xa.append([x[i]])
    # y = 2 * np.sin(x * 2.3) + 0.5 * x ** 3
    # y1 = y + 0.5 * (np.random.rand(len(x)) - 0.5)
    y = ((x * x - 1) ** 3 + 1) * (np.cos(x * 2) + 0.6 * np.sin(x * 1.3))
    y_list = y + (np.random.rand(len(x)) - 0.5)
    # print "input_val_arr = ", input_val_arr

    for cur_iter in range(len(x)):
        for ind in range(len(y_list)):
            lstm_net.x_list_add(xa[ind])

        y_pred = [0] * len(y_list)

        for i in range(len(y_list)):
            y_pred[i] = lstm_net.lstm_node_list[i].state.h[0]

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        # print("loss:", "%.3e" % loss)

        lstm_param.apply_diff(lr=0.1)  # 更新权重
        lstm_net.x_list_clear()

    plt.plot(xa, y_list)
    plt.plot(xa, y_pred)
    plt.show()
    print "loss = ", loss
    print "y_pred = ", y_pred
def test(data_x, day_flaovr_num):
    np.random.seed(0)
    mem_cell_ct = 100
    x_dim = 7  # 7天一个维度

    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)

    """
    input = []
    y_list = []
    # 处理数,将数据处理成  7天一段,逐天滚动
    for i in range(len(data_x) - 7):
        input.append(data_x[i: i+7])
        # y_list.append(day_flaovr_num[i + 7][0])

    for i in range(len(data_x) - 7):
        y_list.append(day_flaovr_num[i + 7])

    # y_list = [-0.5, 0.2, 0.1, -0.5]  # 五步
    # input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for i in range(len(input)):
        print "input_val_arr = ", input[i]

    for i in range(len(y_list)):
        print "y_list =", y_list[i]

    print "len(input) = ", len(input)
    print "len(y_list) = ", len(y_list)
    
"""

    input_val_arr = [
        [1, 2, 3, 4, 5, 6, 7],
        [2, 3, 4, 5, 6, 7, 8]
        [3, 4, 5, 6, 7, 8, 9],
    ]
    y_list = [1, 2, 3]
    for cur_iter in range(100):
        # print("iter", "%2s" % str(cur_iter), end=": ")
        print "str(cur_iter) = ", str(cur_iter)
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        # print("y_pred = [" +
        #       ", ".join(["% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0] for ind in range(len(y_list))]) +
        #       "]", end=", ")
        for i in range(len(y_list)):
            print "y_pred = ", lstm_net.lstm_node_list[i].state.h[0]

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        # print("loss:", "%.3e" % loss)
        print "loss = ", loss
        lstm_param.apply_diff(lr=0.1)  # 更新权重
        lstm_net.x_list_clear()
Esempio n. 16
0
def CreateNetwork(x_dim):
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count 
    mem_cell_ct = 20
    #x_dim = 50
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)

    return lstm_net, lstm_param
Esempio n. 17
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    x1=[float(i)/700 for i in dataAnalyze.testData().pop()]
    print x1
    # parameters for input data dimension and lstm cell count 
    mem_cell_ct = 10
    #x_dim = 50
    x_dim=len(x1)/2
    concat_len = x_dim + mem_cell_ct 
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)
    y_list = [139/500,455/500]
    input_val_arr=[x1[::2],x1[1::2]]
    #input_val_arr = [np.random.random(x_dim) for _ in y_list]
 #   input_val_arr = [np.random.random(x_dim) for _ in range(4)]
 #   input_val_arr = [np.random.uniform(-0.1,0.1,x_dim) for _ in y_list]



    for cur_iter in range(200):
        print "cur iter: ", cur_iter
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

            print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 18
0
def example_0():
    mem_cell_ct = 100
    x_dim = 50
    concat_len = x_dim + mem_cell_ct
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)

    primes = Primes()
    x_list = []
    y_list = []
    for i in range(0, 10):
        sample = primes.get_sample(x_dim, 1, i)
        x = sample[0:x_dim]
        y = sample[x_dim:x_dim+1].tolist()[0]
        x_list.append(x)
        y_list.append(y)

    for cur_iter in range(10000):
        if cur_iter % 1000 == 0:
            print "y_list=", y_list
        for ind in range(len(y_list)):
            lstm_net.x_list_add(x_list[ind])
            if cur_iter % 1000 == 0:
                print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        if cur_iter % 1000 == 0:
            print "loss: ", loss
        lstm_param.apply_diff(lr=0.01)
        lstm_net.x_list_clear()
Esempio n. 19
0
def example_1():  #vavelet
    filename = 'data.txt'  #txt文件和当前脚本在同一目录下,所以不用写具体路径
    data = []
    with open(filename, 'r') as file_to_read:
        while True:
            lines = file_to_read.readline()  # 整行读取数据
            if not lines:
                break
                pass
            # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
            p_tmp = lines.split()
            data.append(p_tmp[0])  # 添加新读取的数据
            pass

    x = np.arange(len(data))
    y = data
    # 连续小波变换
    coef, freqs = pywt.cwt(y, np.arange(1, 128), 'gaus1')
    # 时频(尺度)图
    #plt.matshow( coef )
    # 频率
    #plt.plot(freqs)

    # learns to repeat simple sequence from random inputs
    # 从随机输入重复简单的序列学习
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    # 输入数据维度和lstm单元数量的参数
    mem_cell_ct = 100  # mem_cell_ct是lstm的神经元数目
    x_dim = 50  # x_dim是输入数据的维度
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    #y_list = [-0.5, 0.2, 0.1, -0.5]     #此 代码 其是通过自己实现 lstm 网络来逼近一个序列,y_list = [-0.5, 0.2, 0.1, -0.5]                        #
    y_list = [-0.5, 0, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]  # 输入

    #print(input_val_arr)
    for cur_iter in range(1000):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 20
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    # 一般计算机的随机数都是伪随机数,以一个真随机数(种子)作为初始条件,然后用一定的算法不停迭代产生随机数

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 7  # hidden 输出的维度
    x_dim = 50  # 输入变量维度
    lstm_param = LstmParam(mem_cell_ct, x_dim)  # 神经网络初始化 返回初始化的net对象
    lstm_net = LstmNetwork(lstm_param)
    y_list = [0.5, 0.2, 0.1, 0.5, 0.7]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]
    loss = list()
    lstm_net.net_initial(y_list)

    for cur_iter in range(400):  # 迭代次数
        print("cur iter: ", cur_iter)
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            print("y_pred[%d] : %f" %
                  (ind, lstm_net.lstm_node_list[ind].state.h[0]))

        loss.append(lstm_net.y_list_is(y_list, ToyLossLayer))  # 计算误差并且计算梯度
        print("loss: ", loss[cur_iter])
        lstm_param.apply_diff(lr=0.1)  # 梯度下降法 修正net
        lstm_net.x_list_clear()
    for ind in range(len(y_list)):
        print("y_pred[%d] : %f" %
              (ind, lstm_net.lstm_node_list[ind].state.h[0]))
    legend = ['loss']
    i = 0
    for y in lstm_net.y_list:
        plt.plot(y, '.')
        i += 1
        legend.append('y_' + str(i))
    plt.plot(loss)
    plt.legend(legend)
    plt.show()
Esempio n. 21
0
def example_0():

    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)

    # print(lstm_param.wg)
    # print(lstm_param.wg[0]) # -0.1~0.1 array
    # print(len(lstm_param.wg[0])) # input 150

    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    print("y_list input:", y_list)

    # input transform to vector
    input_val_arr = [np.random.random(x_dim) for _ in y_list]
    # print(lstm_param)
    # print(input_val_arr) # (4, 50) array

    # iteration 100
    for cur_iter in range(100):
        print("iter", "%2s" % str(cur_iter), end=": ")

        # next epoch input
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" + ", ".join([
            "%2.5f" % lstm_net.lstm_node_list[ind].state.h[0]
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)

        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 22
0
def bilstm_runn():
    trainX, trainY, validX, validY, testX, testY = genData()
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    train_X = genBiData(trainX)
    train_Y = trainY
    valid_X = genBiData(validX)
    vaild_Y = validY
    test_X = genBiData(testX)
    test_Y = testY

    del trainX
    del trainY
    del testX
    del testY
    del validX
    del validY
    
    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for x in y_list]

    for cur_iter in range(100):
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])

        print("y_pred = [" +
              ", ".join(["% 2.5f" % lstm_net.lstm_node_list[ind].state.h[0] for ind in range(len(y_list))]) +
              "]", end=", ")

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print("loss:", "%.3e" % loss)
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 23
0
    y_dim = 3

    df1 = pd.read_csv("../../input/lstm/simple_train.csv",
                      header=None,
                      nrows=9000)
    label = df1.pop(0)
    label = np.array(label)
    for i, x in enumerate(['LOW', 'MID', 'HIG']):
        label[np.where(label == x)] = i
    data = np.array(df1)
    data_size = int(data.shape[0] * 0.7)
    valid_size = int(data.shape[0] * 0.3)
    print(data_size)

    lstm_parameter = LstmParameter(cell_count, x_dim, y_dim)
    lstm_network = LstmNetwork(lstm_parameter)

    loss_list = []
    batch_size = 128
    accuracy_best = 0.4
    for iters in range(1000000):
        ind = random.randint(0, data_size - 1)
        train_ind = data[ind].reshape((cell_count, x_dim))
        label_ind = label[ind]

        #        print("iters is" , iters)
        for index in range(cell_count):
            lstm_network.forward_compute(train_ind[index])

        if iters % batch_size == 0:
            if iters == 0:
Esempio n. 24
0
    def _predict(self, num_days, scr):
        # 初始化参数
        np.random.seed(0)
        mem_cell_ct = 100
        x_dim = 10
        lstm_param = LstmParam(mem_cell_ct, x_dim)
        lstm_net = LstmNetwork(lstm_param)
        days = 10
        # 需要预测的值
        y_list = self.data[days:, :]
        # 输入值
        input_val_arr = []
        for d in range(len(y_list)):
            if input_val_arr == []:
                input_val_arr = np.transpose(self.data[d:d + days, :])
            else:
                input_val_arr = np.concatenate(
                    (input_val_arr, np.transpose(self.data[d:d + days, :])), 0)

        out = ''
        # 训练1000次
        for cur_iter in range(1000):
            for ind in range(len(y_list)):
                # 训练
                lstm_net.x_list_add(input_val_arr[ind, :])

            # 计算损失
            loss = lstm_net.y_list_is(y_list, ToyLossLayer)
            # 反向误差
            lstm_param.apply_diff(lr=0.01)
            lstm_net.x_list_clear()
            if (cur_iter + 1) % 50 == 0:
                out += str(cur_iter +
                           1) + '/' + str(1000) + ' ' + str(loss) + '\n'
                if isinstance(scr, scrolledtext.ScrolledText):
                    scr.insert(
                        'end',
                        str(cur_iter + 1) + '/' + str(1000) + ' ' + str(loss) +
                        '\n')
        out += '\n'
        if isinstance(scr, scrolledtext.ScrolledText):
            scr.insert('end', '\n')

        # 查找最低的票价
        min_value = sys.maxint
        min_index = self.length
        for i in range(num_days):
            lstm_net.x_list_add(np.transpose(self.data[-days:, :])[0])
            self.data = np.append(self.data,
                                  [[lstm_net.lstm_node_list[0].state.h[0]]], 0)
            out += '预测第 ' + str(self.length + i + 1) + ' 天票价为 ' + str(
                lstm_net.lstm_node_list[0].state.h[0] * self.std +
                self.mean) + '\n'
            if lstm_net.lstm_node_list[0].state.h[0] < min_value:
                min_value = lstm_net.lstm_node_list[0].state.h[0]
                min_index = self.length + i + 1
            lstm_net.x_list_clear()
        out += '\n预测在第 ' + str(min_index) + ' 天取得最低票价 ' + str(
            min_value * self.std + self.mean) + '\n'
        if isinstance(scr, scrolledtext.ScrolledText):
            scr.insert(
                'end', '预测在第 ' + str(min_index) + ' 天取得最低票价 ' +
                str(min_value * self.std + self.mean) + '\n\n')

        return out
Esempio n. 25
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)

    # parameters for input data dimension and lstm cell count
    mem_cell_ct = 100
    x_dim = 50

    y_list = [-0.8333333333, 0.33333, 0.166666667, -80.8]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)

    lstm_param2 = LstmParam(mem_cell_ct, mem_cell_ct)
    lstm_net2 = LstmNetwork(lstm_param2)

    loss = ToyLossLayer(mem_cell_ct)

    for cur_iter in range(2000):
        # print(y_list)
        print("iter", "%2s" % str(cur_iter), end=": ")
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            lstm_net2.x_list_add(lstm_net.lstm_node_list[ind].state.h)

        print("y_pred = [" + ", ".join([
            "% 2.5f" % loss.value(lstm_net2.lstm_node_list[ind].state.h)
            for ind in range(len(y_list))
        ]) + "]",
              end=", ")

        lossv = lstm_net2.y_list_is(y_list, loss)
        lstm_net.y_list_is2(lstm_net2)
        print("loss:", "%.3e" % lossv)
        lstm_param2.apply_diff(lr=0.1)
        lstm_param.apply_diff(lr=0.1)
        lstm_net2.x_list_clear()
        lstm_net.x_list_clear()
Esempio n. 26
0
    @classmethod
    def bottom_diff(self, pred, label):
        diff = np.zeros_like(pred)
        diff[0] = 2 * (pred[0] - label)
        return diff


if __name__ == "__main__":
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    # parameters for input data dimension and lstm cell count
    x_dim = 50
    mem_cell_ct = 100
    lstm_param = LstmParam(mem_cell_ct, x_dim)
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5, 0.2, 0.1, -0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]

    for cur_iter in range(100):
        print "cur iter: ", cur_iter
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            print "y_pred[%d] : %f" % (ind,
                                       lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()
Esempio n. 27
0
    @classmethod
    def bottom_diff(self, pred, label):
        diff = np.zeros_like(pred)
        diff[0] = 2 * (pred[0] - label)
        return diff


if __name__ == "__main__":
    # learns to repeat simple sequence from random inputs
    np.random.seed(0)
    # parameters for input data dimension and lstm cell count 
    x_dim = 50
    mem_cell_ct = 100
    lstm_param = LstmParam(mem_cell_ct, x_dim) 
    lstm_net = LstmNetwork(lstm_param)
    y_list = [-0.5,0.2,0.1,-0.5]
    input_val_arr = [np.random.random(x_dim) for _ in y_list]
    
    for cur_iter in range(100):
        print "cur iter: ", cur_iter
        for ind in range(len(y_list)):
            lstm_net.x_list_add(input_val_arr[ind])
            print "y_pred[%d] : %f" % (ind, lstm_net.lstm_node_list[ind].state.h[0])

        loss = lstm_net.y_list_is(y_list, ToyLossLayer)
        print "loss: ", loss
        lstm_param.apply_diff(lr=0.1)
        lstm_net.x_list_clear()

# cur iter:  0
Esempio n. 28
0
def example_0():
    # learns to repeat simple sequence from random inputs
    np.random.seed(3)

    # Number of iterations or epochs
    nEpochs = 100;

    # Internal cell widths
    cellWidth = 100
    
    # Number of random input numbers for each output
    xSize = 50

    ## Initialise parameters
    # Containg weights and derivatives of loss function wrt weights)
    weights = LstmWeights(cellWidth, xSize)
    
    ## Prepare target outputs
    outData  = [0.5, 0.2, 0.1, 0.5]
    
    # number of unfolded cells
    nCells   = len(outData)

    # Initialise LSTM 
    trainLSTM = LstmNetwork(weights, nCells, cellWidth, xSize)

    # Input data
    inData = np.random.random([nCells, xSize]) # [4, 50]
    
    # Train and sample at the same time
    for epoch in range(nEpochs):

        #
        # Train model
        #

        # Input data and propagate forwards through time
        trainLSTM.fwdProp(inData, weights)

        # Evaluate loss function and back propagate through time
        loss, grads = trainLSTM.bptt(outData, ToyLossLayer, weights)

        # Clear inputs to start afresh for next epoch
        trainLSTM.gotoStartCell()

        # Collect the gradients


        # Apply weight update
        weights.update(grads, lr=0.1)
        #weights.weightUpdate(lr=0.1)


        #
        # Test model and print logging information
        #

        # Sample from new model configured with the trained weights
        testLSTM = LstmNetwork(weights, nCells, cellWidth, xSize)
        testLSTM.fwdProp(inData, weights)
        state = testLSTM.sample()

        #pdb.set_trace()

        # Print logging information
        for ind in range(nCells):
            print "  Input %d rand.  Target = %1.3f. Output = %1.3f. Delta = %1.3f" % (xSize, float(outData[ind]), float(state[ind]), outData[ind]-state[ind])
        print "Epoch: %3d. loss: %5.10f\n" % (epoch, loss)