示例#1
0
def main():
    ##% matplotlib inline
    params_dirname = "fit_a_line.inference.model"
    x, y, y_predict = network()
    feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
    avg_loss, main_program, test_program, startup_program = backPropagation(
        y_predict, y)
    exe.run(startup_program)
    train_prompt = "train cost"
    test_prompt = "test cost"
    from paddle.utils.plot import Ploter
    plot_prompt = Ploter(train_prompt, test_prompt)
    step = 0

    exe_test = fluid.Executor(place)
    train_reader, test_reader = preProcess()
    for pass_id in range(num_epochs):
        for data_train in train_reader():
            avg_loss_value, = exe.run(main_program,
                                      feed=feeder.feed(data_train),
                                      fetch_list=[avg_loss])
            if step % 10 == 0:  # 每10个批次记录并输出一下训练损失
                plot_prompt.append(train_prompt, step, avg_loss_value[0])
                plot_prompt.plot()
                print("%s, Step %d, Cost %f" %
                      (train_prompt, step, avg_loss_value[0]))
            if step % 100 == 0:  # 每100批次记录并输出一下测试损失
                test_metics = train_test(executor=exe_test,
                                         program=test_program,
                                         reader=test_reader,
                                         fetch_list=[avg_loss.name],
                                         feeder=feeder)
                plot_prompt.append(test_prompt, step, test_metics[0])
                plot_prompt.plot()
                print("%s, Step %d, Cost %f" %
                      (test_prompt, step, test_metics[0]))
                if test_metics[0] < 10.0:  # 如果准确率达到要求,则停止训练
                    break

            step += 1

            if math.isnan(float(avg_loss_value[0])):
                sys.exit("got NaN loss, training failed.")

            # 保存训练参数到之前给定的路径中
            if params_dirname is not None:
                fluid.io.save_inference_model(params_dirname, ['x'],
                                              [y_predict], exe)
示例#2
0
exe.run(startup_program)
train_prompt = "train cost"
test_prompt = "test cost"

from paddle.utils.plot import Ploter
plot_prompt = Ploter(train_prompt, test_prompt)
step = 0

exe_test = fluid.Executor(place)
for pass_id in range(num_epochs):
    for data_train in train_reader():
        avg_loss_value, = exe.run(main_program,
                                  feed=feeder.feed(data_train),
                                  fetch_list=[avg_loss])
        if step % 10 == 0:  # 每10个批次记录并输出一下训练损失
            plot_prompt.append(train_prompt, step, avg_loss_value[0])
            #plot_prompt.plot()
            print("%s, Step %d, Cost %f" %
                  (train_prompt, step, avg_loss_value[0]))
        if step % 100 == 0:  # 每100批次记录并输出一下测试损失
            test_metics = train_test(executor=exe_test,
                                     program=test_program,
                                     reader=test_reader,
                                     fetch_list=[avg_loss.name],
                                     feeder=feeder)
            plot_prompt.append(test_prompt, step, test_metics[0])
            #plot_prompt.plot()
            print("%s, Step %d, Cost %f" % (test_prompt, step, test_metics[0]))
            if test_metics[0] < 10.0:  # 如果准确率达到要求,则停止训练
                break
def event_handler_plot(ploter_title, step, miou):
    train_prompt = "Train cost"
    test_prompt = "Test cost"
    cost_ploter = Ploter(train_prompt, test_prompt)
    cost_ploter.append(ploter_title, step, miou)
    cost_ploter.plot()
示例#4
0
                loss, distance, label = loss_contrastive.forward(
                    Output1, Output2, Label)
                avg_loss = layers.reduce_mean(loss)
                sum_loss += avg_loss.numpy()[0]
                print(
                    "Epoch number {}, step {}: Label {}; Euclidean Distance {}; Current loss {}\n"
                    .format(epoch, step, np.concatenate(label),
                            np.concatenate(distance), avg_loss.numpy()))
                # 后向传播,更新参数的过程
                # if distance == 0:
                #     continue
                avg_loss.backward()
                optimizer.minimize(avg_loss)
                model.clear_gradients()
                step += 1
            train_cost.append(train_prompt, epoch, sum_loss / step)

            # 保存模型目录
            save_path = './checkpoint/' + Config.training_dir.split('/')[-1]
            os.makedirs(save_path, exist_ok=True)
            # 保存模型参数
            if sum_loss < _loss:
                _loss = sum_loss
                save_path += '/transform_2D_epoch{}'.format(epoch)
                fluid.save_dygraph(model.state_dict(), save_path)

        os.makedirs('/home/aistudio/figure_and_csv', exist_ok=True)
        train_cost.plot('/home/aistudio/figure_and_csv/' +
                        Config.training_dir.split('/')[-1] +
                        '_train_{}.png'.format(t))