예제 #1
0
def main2():
    dnn = DNN(input=28 * 28,
              layers=[DropoutLayer(160, LQ),
                      Layer(10, LCE)],
              eta=0.05,
              lmbda=1)  # 98%
    dnn.initialize_rand()
    train, test, vadilation = load_mnist_simple()

    f_names = [f'mnist_expaned_k0{i}.pkl.gz' for i in range(50)]
    shuffle(f_names)
    for f_name in f_names:
        print(f_name)
        with timing("load"):
            raw_data = load_data(f_name)
        with timing("shuffle"):
            shuffle(raw_data)
        with timing("reshape"):
            data = [(x.reshape((784, 1)), y)
                    for x, y in islice(raw_data, 100000)]
            del raw_data
        with timing("learn"):
            dnn.learn(data)
        del data
        print('TEST:', dnn.test(test))
예제 #2
0
def main():
    train, test, vadilation = load_mnist_simple()
    # x, y = train[0]
    # print("x: ", x.shape)
    # print("y: ", y)

    with timing(f""):
        # dnn = DNN(input=28 * 28, layers=[Layer(30, LQ), Layer(10, LCE)], eta=0.05)  # 96%
        # dnn = DNN(input=28 * 28, layers=[Layer(30, LQ), Layer(10, SM)], eta=0.001)  # 68%
        # dnn = DNN(input=28 * 28, layers=[Layer(100, LQ), Layer(10, LCE)], eta=0.05, lmbda=5)  # 98%
        # dnn = DNN(input=28 * 28, layers=[DropoutLayer(100, LQ), Layer(10, LCE)], eta=0.05)  # 97.5%
        dnn = DNN(input=28 * 28, layers=[DropoutLayer(160, LQ), Layer(10, LCE)], eta=0.05, lmbda=3)
        dnn.initialize_rand()
        dnn.learn(train, epochs=30, test=vadilation, batch_size=29)

    print('test:', dnn.test(test))
    print(dnn.stats())
예제 #3
0
              512,
              train=train_mode,
              isnorm=True,
              name='w_%s' % str(num),
              graph=g,
              scale=[0.1, 100, 100])  # 定义网络
    memory_norm = net.norm(memory)
    if train_mode != 0:
        # 训练模式
        X = memory_norm[:, 1:].copy()
        Y = memory_norm[:, 0:1].copy()
        losses = []
        for i in range(5000):
            sample_index = np.random.choice(len(X), size=1000)
            batch_x = X[sample_index, :]
            batch_y = Y[sample_index, :]
            loss, mae = net.learn(batch_x, batch_y)
            losses.append(loss)
            print(i + 1, '预测平均误差是', mae)
        plt.plot(losses)
        net.store()
        plt.show()
    else:
        # 测试模式
        cav = AircraftEnv()
        info = guidance(cav, net)
        print(info['range_error'])
        state_record, h_cmds = info['state_records'], info['hcmd_records']
        cav.plot(state_record, h_cmds)
        plt.show()
예제 #4
0
if train:
    # 进行训练
    # 通过x_old和x_new来计算出来x导数
    # 然后利用神经网络来进行学习
    for _ in range(5):
        observation = env.reset()
        my_observation = observation
        mu = 0
        for epi in range(20000):
            u = env.action_space.sample()
            my_observation, target = step_my(observation, u, net,mu)
            observation, reward, done, info = env.step(u)
            mu = get_mu(mu, observation, my_observation)
            net.store_sample(observation, target)
            if epi % 100 == 0:
                result = net.learn()
                if result:
                    print(epi, result)
    net.store_net()

# 进行测试
# 画出三组对比图
observation = env.reset()
my_observation = observation
for epi in range(5000):
    u = env.action_space.sample()
    my_observation, d = step_my(observation, u, net)
    observation, reward, done, info = env.step(u)
    record_tru.append(observation)
    record_pre.append(my_observation)
record_tru = np.array(record_tru)