示例#1
0
def train1():
    print("train1........................................")
    print("\t训练代码demo")
    with fluid.dygraph.guard():
        epoch_num = 5
        BATCH_SIZE = 64
        train_reader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=32,
                                    drop_last=True)
        mnist = MNIST()
        adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001,
                                             parameter_list=mnist.parameters())
        for epoch in range(epoch_num):
            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0].reshape(1, 28, 28)
                                      for x in data]).astype('float32')
                y_data = np.array([x[1] for x in data
                                   ]).astype('int64').reshape(-1, 1)
                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)

                cost = mnist(img)

                loss = fluid.layers.cross_entropy(cost, label)
                avg_loss = fluid.layers.mean(loss)

                if batch_id % 100 == 0 and batch_id is not 0:
                    print("epoch: {}, batch_id: {}, loss is: {}".format(
                        epoch, batch_id, avg_loss.numpy()))
                avg_loss.backward()
                adam.minimize(avg_loss)
                mnist.clear_gradients()
示例#2
0
def train4(use_cudnn, model_file):
    with fluid.dygraph.guard():
        epoch_num = 5
        BATCH_SIZE = 64

        mnist = MNIST(use_cudnn=use_cudnn)
        adam = fluid.optimizer.Adam(learning_rate=0.001,
                                    parameter_list=mnist.parameters())
        train_reader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=BATCH_SIZE,
                                    drop_last=True)
        test_reader = paddle.batch(paddle.dataset.mnist.test(),
                                   batch_size=BATCH_SIZE,
                                   drop_last=True)

        np.set_printoptions(precision=3, suppress=True)
        dy_param_init_value = {}
        start_time = time.time()
        for epoch in range(epoch_num):
            for batch_id, data in enumerate(train_reader()):
                # step 1 : 处理输入
                dy_x_data = np.array([x[0].reshape(1, 28, 28)
                                      for x in data]).astype('float32')
                y_data = np.array([x[1] for x in data
                                   ]).astype('int64').reshape(BATCH_SIZE, 1)
                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True

                # step 2 : 前向传播&&损失函数
                cost = mnist(img)
                loss = fluid.layers.cross_entropy(cost, label)
                avg_loss = fluid.layers.mean(loss)
                dy_out = avg_loss.numpy()

                # step 3 : 反向传播&&最优化
                avg_loss.backward()
                adam.minimize(avg_loss)
                mnist.clear_gradients()
                # step 4 : 测试模型
                if batch_id % 100 == 0 and batch_id is not 0:
                    mnist.eval()
                    test_cost, test_acc = test(test_reader, mnist, BATCH_SIZE)
                    mnist.train()
                    print(
                        "epoch {}, batch_id {}, train loss is {}, test cost is {}, test acc is {}"
                        .format(epoch, batch_id, avg_loss.numpy(), test_cost,
                                test_acc))
        fluid.dygraph.save_dygraph(mnist.state_dict(), model_file)
        end_time = time.time()
        print("training model has finished! time=%.2fs" %
              (end_time - start_time))
示例#3
0
def train3(use_cudnn, model_file):
    print("train3........................................")
    print("\t多卡训练(paddle有bug,没有调试通)")
    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)
    with fluid.dygraph.guard(place):
        strategy = fluid.dygraph.parallel.prepare_context()
        epoch_num = 5
        BATCH_SIZE = 64
        mnist = MNIST(use_cudnn=use_cudnn)
        adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001,
                                             parameter_list=mnist.parameters())
        mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy)

        train_reader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=BATCH_SIZE,
                                    drop_last=True)
        train_reader = fluid.contrib.reader.distributed_batch_reader(
            train_reader)

        start_time = time.time()
        for epoch in range(epoch_num):
            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0].reshape(1, 28, 28)
                                      for x in data]).astype('float32')
                y_data = np.array([x[1] for x in data
                                   ]).astype('int64').reshape(-1, 1)
                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True

                cost, acc = mnist(img, label)

                loss = fluid.layers.cross_entropy(cost, label)
                avg_loss = fluid.layers.mean(loss)

                avg_loss = mnist.scale_loss(avg_loss)
                avg_loss.backward()
                mnist.apply_collective_grads()

                adam.minimize(avg_loss)
                mnist.clear_gradients()
                if batch_id % 100 == 0 and batch_id is not 0:
                    print("epoch: {}, batch_id: {}, loss is: {}".format(
                        epoch, batch_id, avg_loss.numpy()))
        fluid.dygraph.save_dygraph(mnist.state_dict(), model_file)
        end_time = time.time()
        print("training model has finished! time=%.2fs" %
              (end_time - start_time))
示例#4
0
def train2():
    print("train2........................................")
    print("\t获取神经网络中的参数")
    with fluid.dygraph.guard():
        epoch_num = 5
        BATCH_SIZE = 64

        mnist = MNIST()
        adam = fluid.optimizer.AdamOptimizer(learning_rate=0.0001,
                                             parameter_list=mnist.parameters())
        train_reader = paddle.batch(paddle.dataset.mnist.train(),
                                    batch_size=BATCH_SIZE,
                                    drop_last=True)

        np.set_printoptions(precision=3, suppress=True)
        for epoch in range(epoch_num):
            for batch_id, data in enumerate(train_reader()):
                dy_x_data = np.array([x[0].reshape(1, 28, 28)
                                      for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data
                                   ]).astype("int64").reshape(BATCH_SIZE, 1)
                img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
                label.stop_gradient = True

                cost = mnist(img)
                loss = fluid.layers.cross_entropy(cost, label)
                avg_loss = fluid.layers.mean(loss)

                dy_out = avg_loss.numpy()

                avg_loss.backward()
                adam.minimize(avg_loss)
                mnist.clear_gradients()

                dy_param_value = {}
                for param in mnist.parameters():
                    print("%s = %s" % (param.name, str(param.numpy())))
                    dy_param_value[param.name] = param.numpy()

                if batch_id % 20 == 0:
                    print("loss at step {}: {}".format(batch_id,
                                                       avg_loss.numpy()))
                break
            break
        print("Final loss : {}".format(avg_loss.numpy()))