示例#1
0
def main():
    ##% matplotlib inline
    params_dirname = "fit_a_line.inference.model"
    x, y, y_predict = network()
    feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
    avg_loss, main_program, test_program, startup_program = backPropagation(
        y_predict, y)
    exe.run(startup_program)
    train_prompt = "train cost"
    test_prompt = "test cost"
    from paddle.utils.plot import Ploter
    plot_prompt = Ploter(train_prompt, test_prompt)
    step = 0

    exe_test = fluid.Executor(place)
    train_reader, test_reader = preProcess()
    for pass_id in range(num_epochs):
        for data_train in train_reader():
            avg_loss_value, = exe.run(main_program,
                                      feed=feeder.feed(data_train),
                                      fetch_list=[avg_loss])
            if step % 10 == 0:  # 每10个批次记录并输出一下训练损失
                plot_prompt.append(train_prompt, step, avg_loss_value[0])
                plot_prompt.plot()
                print("%s, Step %d, Cost %f" %
                      (train_prompt, step, avg_loss_value[0]))
            if step % 100 == 0:  # 每100批次记录并输出一下测试损失
                test_metics = train_test(executor=exe_test,
                                         program=test_program,
                                         reader=test_reader,
                                         fetch_list=[avg_loss.name],
                                         feeder=feeder)
                plot_prompt.append(test_prompt, step, test_metics[0])
                plot_prompt.plot()
                print("%s, Step %d, Cost %f" %
                      (test_prompt, step, test_metics[0]))
                if test_metics[0] < 10.0:  # 如果准确率达到要求,则停止训练
                    break

            step += 1

            if math.isnan(float(avg_loss_value[0])):
                sys.exit("got NaN loss, training failed.")

            # 保存训练参数到之前给定的路径中
            if params_dirname is not None:
                fluid.io.save_inference_model(params_dirname, ['x'],
                                              [y_predict], exe)
示例#2
0
        for x_c in zip(accumulated, outs):
            print(count,x_c,outs,zip(accumulated, outs))   
        #print(count,x_c,accumulated,outs,zip(accumulated, outs))
        count += 1 # 累加测试集中的样本数量
    return [x_d / count for x_d in accumulated] # 计算平均损失


#初始化执行器
#%matplotlib inline # 在jupyter notebook中用%run train20.py时,此句报错。但现运行此句,再%run train20.py,可画出曲线。
params_dirname = "fit_a_line.inference.model"
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe.run(startup_program)
train_prompt = "train cost"
test_prompt = "test cost"
from paddle.utils.plot import Ploter
plot_prompt = Ploter(train_prompt, test_prompt)
step = 0

exe_test = fluid.Executor(place)

#训练主循环
for pass_id in range(num_epochs):
    #n=0
    for data_train in train_reader():
        avg_loss_value = exe.run(main_program,
                                  feed=feeder.feed(data_train),
                                  fetch_list=[avg_loss])
        #print("(%d,%d):avg_loss_value[0]= %f,avg_loss.name=%s" %
              #(pass_id,n,avg_loss_value[0],avg_loss.name))
        #print(avg_loss)
        #print(data_train)
示例#3
0

# In[20]:


def event_handler(pass_id, batch_id, cost):
    print("Pass %d, Batch %d, Cost %f" % (pass_id, batch_id, cost))


# In[21]:

from paddle.utils.plot import Ploter

train_prompt = "Train cost"
test_prompt = "Test cost"
cost_ploter = Ploter(train_prompt, test_prompt)


def event_handler_plot(ploter_title, step, cost):
    cost_ploter.append(ploter_title, step, cost)
    cost_ploter.plot()


# In[22]:

use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace

prediction, [avg_loss, acc] = train_program()

img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
示例#4
0

build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True  # 开启Inplace策略

#训练主循环
##给出需要存储的目录名,并初始化一个执行器
params_dirname = "fit_a_line.inference.model"
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe.run(fluid.default_startup_program())
exe.run(startup_program)
train_prompt = "train cost"
test_prompt = "test cost"

from paddle.utils.plot import Ploter
plot_prompt = Ploter(train_prompt, test_prompt)
step = 0

exe_test = fluid.Executor(place)
for pass_id in range(num_epochs):
    for data_train in train_reader():
        avg_loss_value, = exe.run(main_program,
                                  feed=feeder.feed(data_train),
                                  fetch_list=[avg_loss])
        if step % 10 == 0:  # 每10个批次记录并输出一下训练损失
            plot_prompt.append(train_prompt, step, avg_loss_value[0])
            #plot_prompt.plot()
            print("%s, Step %d, Cost %f" %
                  (train_prompt, step, avg_loss_value[0]))
        if step % 100 == 0:  # 每100批次记录并输出一下测试损失
            test_metics = train_test(executor=exe_test,
示例#5
0
    return optimizer


# In[12]:

feed_order = ['img', 'label']  #数据格式
params_dirname = "./DNN_model"  #模型保存路径

# In[13]:

# 事件处理函数
from paddle.utils.plot import Ploter
from paddle.fluid.contrib.trainer import EndStepEvent
train_title = "Train cost"
test_title = "Test cost"
plot_cost = Ploter(train_title, test_title)

step = 0


def event_handler_plot(event):
    global step
    if isinstance(event, EndStepEvent):
        if event.step % 2 == 0:  # 若干个batch,记录cost
            if event.metrics[0] < 10:
                plot_cost.append(train_title, step, event.metrics[0])
                plot_cost.plot()
        if event.step % 20 == 0:  # 若干个batch,记录cost
            test_metrics = trainer.test(reader=test_reader,
                                        feed_order=feed_order)
            if test_metrics[0] < 10:
def event_handler_plot(ploter_title, step, miou):
    train_prompt = "Train cost"
    test_prompt = "Test cost"
    cost_ploter = Ploter(train_prompt, test_prompt)
    cost_ploter.append(ploter_title, step, miou)
    cost_ploter.plot()
示例#7
0
 # param_dict, opt_dict = fluid.load_dygraph('./checkpoint/2D/transform_2D_epoch10')
 # model.load_dict(param_dict)
 model.train()
 # 定义学习率,并加载优化器参数到模型中
 # total_steps = len(train_data_couple) * (Config.train_number_epochs - 1)
 # lr = fluid.dygraph.PolynomialDecay(0.01, total_steps, 0.0001)
 # 定义优化器
 optimizer = fluid.optimizer.SGD(learning_rate=0.000001,
                                 parameter_list=model.parameters())
 # optimizer = fluid.optimizer.AdamOptimizer(learning_rate=lr, parameter_list=model.parameters())
 # regularization=fluid.regularizer.L2Decay(regularization_coeff=0.1))
 # 定义损失函数
 loss_contrastive = ContrastiveLoss(512)
 # Training Time!
 train_prompt = "Train cost"
 train_cost = Ploter(train_prompt)
 print("Training time ...")
 _loss = float("INF")
 for epoch in range(1, Config.train_number_epochs + 1):
     """Training"""
     step = 0
     data_couple = Config.create_data_couple()
     sum_loss = 0
     while len(data_couple) >= batch:
         siamese_dataset = Dataset().generate(
             data_couple, batch=Config.train_batch_size)
         Input1, Input2, Label = [
             fluid.dygraph.to_variable(x.astype('float32'))
             for x in siamese_dataset
         ]
         # print(Input1.shape, Input2.shape, Label.shape)
示例#8
0
def train():
    dataset = 'A'
    # 训练数据集
    img_root_dir = r'D:/YourZhouProject/mcnn_project/pytorch_mcnn/part_' + dataset + r'_final/train_data/images/'
    gt_root_dir = r'D:/YourZhouProject/mcnn_project/pytorch_mcnn/part_' + dataset + r'_final/train_data/ground_truth/'
    # 测试数据集
    val_img_root_dir = r'D:/YourZhouProject/mcnn_project/pytorch_mcnn/part_' + dataset + r'_final/test_data/images/'
    val_gt_root_dir = r'D:/YourZhouProject/mcnn_project/pytorch_mcnn/part_' + dataset + r'_final/test_data/ground_truth/'

    # 训练数据集文件列表
    img_file_list = os.listdir(img_root_dir)
    gt_img_file_list = os.listdir(gt_root_dir)

    # 验证数据集文件列表
    val_img_file_list = os.listdir(val_img_root_dir)
    val_gt_file_list = os.listdir(val_gt_root_dir)

    # 获得训练参数信息
    cfig = ConfigFactory()

    # 变量定义
    input_img_data = fluid.data(name='input_img_data',
                                shape=[-1, 3, 256, 256],
                                dtype='float32')
    density_map_data = fluid.data(name='density_map_data',
                                  shape=[-1, 1, 64, 64],
                                  dtype='float32')

    # network generation网络生成
    inference_density_map = multi_column_cnn(input_img_data)

    # density map loss密度图损失
    # loss_sub = fluid.layers.elementwise_sub(density_map_data, inference_density_map)
    # density_map_loss = 0.5 * fluid.layers.reduce_sum(fluid.layers.square(loss_sub))
    squar = fluid.layers.square_error_cost(input=inference_density_map,
                                           label=density_map_data)
    cost = fluid.layers.sqrt(squar, name=None)
    # print(cost.shape)
    avg_cost = fluid.layers.mean(cost)
    # print(avg_cost.shape)

    # jointly training联合训练
    # 获取损失函数和准确率函数
    # joint_loss = density_map_loss
    # avg_cost = fluid.layers.mean(joint_loss)
    # acc = fluid.layers.accuracy(input=inference_density_map, label=density_map_data)

    # 获取训练和测试程序
    test_program = fluid.default_main_program().clone(for_test=True)

    # 我们使用的是Adam优化方法,同时指定学习率
    # 定义优化方法
    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=cfig.lr)
    optimizer.minimize(avg_cost)

    # 定义一个使用GPU的执行器
    place = fluid.CUDAPlace(0)
    # place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    # 进行参数初始化
    exe.run(fluid.default_startup_program())

    # 是否需要用feeder读入?
    # feeder = fluid.DataFeeder(place=place, feed_list=[input_img_data, density_map_data])

    # 获得训练日志保存地址
    file_path = cfig.log_router

    # 创建训练日志文件夹
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # 创建模型训练日志文件
    if not os.path.exists(cfig.model_router):
        os.makedirs(cfig.model_router)
    log = open(cfig.log_router + cfig.name + r'_training.logs',
               mode='a+',
               encoding='utf-8')

    # 使用plt可视化训练
    train_prompt = "Train cost"
    cost_ploter = Ploter(train_prompt)

    def event_handler_plot(ploter_title, step, cost):
        cost_ploter.append(ploter_title, step, cost)
        cost_ploter.plot()

    # 开始训练
    step = 0

    for i in range(cfig.total_iters):
        # 训练
        for file_index in range(len(img_file_list)):
            # 获得图片路径
            img_path = img_root_dir + img_file_list[file_index]
            # 检查图片是否符合
            img_check = cv.imread(img_path)
            h, w, s = img_check.shape[0], img_check.shape[1], img_check.shape[
                2]
            if h < 256 or w < 256 or s < 3:
                continue

            # 获得标注文件路径
            gt_path = gt_root_dir + 'GT_' + img_file_list[file_index].split(
                r'.')[0]
            # 得到需要训练的图片、真实密度图、真实数量

            # 数据增强
            # Data_enhancement = np.random.randint(2)
            Data_enhancement = 1
            # print(Data_enhancement)
            if Data_enhancement == 0:
                img, gt_dmp, gt_count = read_crop_train_data(img_path,
                                                             gt_path,
                                                             scale=4)
            else:
                img, gt_dmp, gt_count = read_resize_train_data(img_path,
                                                               gt_path,
                                                               scale=4)

            # show_density_map(img[0, 0, :, :])
            # show_density_map(gt_dmp[0, 0, :, :])
            # plt.imshow(img[0, 0, :, :])
            # plt.show()
            # cv.imshow("123", img[0, :, :, :])
            # # cv.imshow("123", rea_img)
            # cv.waitKey(0)
            # cv.destroyAllWindows()

            # train_img = ((img - 127.5) / 128).astype(np.float32)
            train_img = img.astype(np.float32) / 255.0 * 2.0 - 1.0
            # 数据读入
            feed_dict = {
                'input_img_data': train_img,
                'density_map_data': gt_dmp.astype(np.float32)
            }

            # 训练主题
            inf_dmp, loss = exe.run(
                program=fluid.default_main_program(),
                feed=feed_dict,
                fetch_list=[inference_density_map, avg_cost])

            # show_density_map(inf_dmp[0, 0, :, :])

            # if step % 100 == 0:
            #     event_handler_plot(train_prompt, step, loss[0])

            # 得到当前时间
            format_time = str(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

            # 训练过程查看
            format_str = 'iter={}, step={}, joint loss={}, inference={}, gt={} '
            log_line = format_time, img_file_list[
                file_index], format_str.format(
                    i,
                    i * len(img_file_list) + file_index, loss, inf_dmp.sum(),
                    gt_count)
            log.writelines(str(log_line) + '\n')
            print(log_line)

            step += 1

        # 测试
        if i % 5 == 0:
            # 训练的过程中可以保存预测模型,用于之后的预测。
            # 保存预测模型
            save_path = cfig.model_router + 'infer_model' + str(i) + '/'
            # 创建保持模型文件目录
            os.makedirs(save_path)
            # 保存预测模型
            fluid.io.save_inference_model(
                save_path,
                feeded_var_names=[input_img_data.name],
                target_vars=[inference_density_map],
                executor=exe)

            val_log = open(cfig.log_router + cfig.name + r'_validating_' +
                           str(i) + '_.logs',
                           mode='w',
                           encoding='utf-8')
            absolute_error = 0.0
            square_error = 0.0
            # validating验证
            for file_index in range(len(val_img_file_list)):

                # 获得测试图片路径
                img_path = val_img_root_dir + val_img_file_list[file_index]
                # 检查图片是否符合
                img_check = cv.imread(img_path)
                h, w, s = img_check.shape[0], img_check.shape[
                    1], img_check.shape[2]
                if h < 256 or w < 256 or s < 3:
                    continue

                # 获得测试标注文件路径
                gt_path = val_gt_root_dir + 'GT_' + val_img_file_list[
                    file_index].split(r'.')[0]
                # 得到需要测试的图片、真实密度图、真实数量
                img, gt_dmp, gt_count = read_resize_train_data(img_path,
                                                               gt_path,
                                                               scale=4)

                # 数据读入
                feed_dict = {
                    'input_img_data': ((img - 127.5) / 128).astype(np.float32),
                    'density_map_data': gt_dmp.astype(np.float32)
                }

                # 训练主题
                inf_dmp, loss = exe.run(
                    program=test_program,
                    feed=feed_dict,
                    fetch_list=[inference_density_map, avg_cost])

                # 测试过程查看
                format_time = str(
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                format_str = 'Test iter={}, step={}, joint loss={}, inference={}, gt={} '
                absolute_error = absolute_error + np.abs(
                    np.subtract(gt_count, inf_dmp.sum())).mean()
                square_error = square_error + np.power(
                    np.subtract(gt_count, inf_dmp.sum()), 2).mean()
                log_line = format_time, val_img_file_list[
                    file_index], format_str.format(i, file_index, loss,
                                                   inf_dmp.sum(), gt_count)
                val_log.writelines(str(log_line) + '\n')
                print(log_line)

            # 获得测试结果(均方误差和均方根误差)
            mae = absolute_error / len(val_img_file_list)
            rmse = np.sqrt(absolute_error / len(val_img_file_list))
            val_log.writelines(
                str('MAE_' + str(mae) + '_MSE_' + str(rmse)) + '\n')
            val_log.close()
            print(str('MAE_' + str(mae) + '_MSE_' + str(rmse)))
    accumulated = len([avg_cost, acc]) * [0]
    for tid, test_data in enumerate(reader()):
        avg_cost_np = test_exe.run(program=program,
                                   feed=feeder_test.feed(test_data),
                                   fetch_list=[avg_cost, acc])
        accumulated = [x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)]
        count += 1
    return [x / count for x in accumulated]

params_dirname = "image_classification_resnet.inference.model"

from paddle.utils.plot import Ploter

train_prompt = "Train cost"
test_prompt = "Test cost"
plot_cost = Ploter(test_prompt,train_prompt)

# main train loop.
def train_loop():
    feed_var_list_loop = [
        main_program.global_block().var(var_name) for var_name in feed_order
    ]
    feeder = fluid.DataFeeder(
        feed_list=feed_var_list_loop, place=place)
    exe.run(star_program)

    step = 0
    for pass_id in range(EPOCH_NUM):
        for step_id, data_train in enumerate(train_reader()):
            avg_loss_value = exe.run(main_program,
                                     feed=feeder.feed(data_train),
示例#10
0
def main():
    batch_size = 32
    num_epochs = args.num_epochs
    net_type = args.net

    # 训练ckpt和inf模型路径
    param_base_dir = os.path.join(code_base_dir, 'params')
    param_base_dir = os.path.join(param_base_dir, net_type)
    infer_param_path = os.path.join(param_base_dir, "inf")
    ckpt_param_path = os.path.join(param_base_dir, "ckpt")
    print(infer_param_path)
    print(ckpt_param_path)

    train_reader = paddle.batch(
        paddle.reader.shuffle(data_reader(), int(batch_size * 1.5)),
        batch_size)
    test_reader = paddle.batch(
        paddle.reader.shuffle(data_reader(8, 10), int(batch_size * 1.5)),
        batch_size)

    train_program = fluid.Program()
    train_init = fluid.Program()

    with fluid.program_guard(train_program, train_init):
        image = fluid.layers.data(name='image',
                                  shape=[3, 512, 512],
                                  dtype='float32')
        label = fluid.layers.data(name='label',
                                  shape=[1, 512, 512],
                                  dtype='int32')
        train_loader = fluid.io.DataLoader.from_generator(
            feed_list=[image, label], capacity=batch_size)
        test_loader = fluid.io.DataLoader.from_generator(
            feed_list=[image, label], capacity=batch_size)

        if net_type == "unet_simple":
            prediction = unet_simple(image, 2, [512, 512])
        elif net_type == "unet_base":
            prediction = unet_base(image, 2, [512, 512])
        elif net_type == "deeplabv3":
            prediction = deeplabv3p(image, 2)
        else:
            print("错误的网络类型")
            sys.exit(0)

        avg_loss = create_loss(prediction, label, 2)

        miou = mean_iou(prediction, label, 2)

        # 		decay=paddle.fluid.regularizer.L2Decay(0.1)
        # optimizer = fluid.optimizer.SGD(learning_rate=0.0005,regularization=decay)
        # optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.02,regularization=decay)
        # optimizer = fluid.optimizer.RMSProp(learning_rate=0.1,momentum=0.8,centered=True, regularization=decay)

        optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.003)

        # optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.006, momentum=0.8,regularization=decay)

        optimizer.minimize(avg_loss)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(train_init)
    # fluid.io.load_persistables(exe, ckpt_param_path, train_init)

    exe_test = fluid.Executor(place)

    test_program = train_program.clone(for_test=True)

    # train_program=fluid.CompiledProgram(train_program).with_data_parallel(loss_name=avg_loss.name)
    test_program = fluid.CompiledProgram(test_program).with_data_parallel(
        loss_name=avg_loss.name)

    train_loader.set_sample_list_generator(train_reader, places=place)
    test_loader.set_sample_list_generator(test_reader, places=place)

    feeder = fluid.DataFeeder(place=place, feed_list=[image, label])

    step = 1
    best_miou = 0

    train_prompt = "Train_miou"
    test_prompt = "Test_miou"

    plot_prompt = Ploter(train_prompt, test_prompt)

    for pass_id in range(num_epochs):
        for data_train in train_loader():
            avg_loss_value, miou_value = exe.run(train_program,
                                                 feed=data_train,
                                                 fetch_list=[avg_loss, miou])

            if step % 10 == 0:
                print("\t\tTrain pass %d, Step %d, Cost %f, Miou %f" %
                      (pass_id, step, avg_loss_value[0], miou_value[0]))

            # if step % 10 ==0:
            # plot_prompt.append(train_prompt, step, miou_value[0])
            # plot_prompt.plot()

            eval_miou = 0
            if step % 100 == 0:
                auc_metric = fluid.metrics.Auc("AUC")
                test_losses = []
                test_mious = []
                for _, test_data in enumerate(test_loader()):
                    # print(test_data)
                    # input("pause")

                    _, test_loss, test_miou = exe_test.run(
                        test_program,
                        feed=test_data,
                        fetch_list=[prediction, avg_loss, miou])
                    test_losses.append(test_loss[0])
                    test_mious.append(test_miou[0])

                eval_miou = np.average(np.array(test_mious))
                # plot_prompt.append(test_prompt, step, eval_miou)
                # plot_prompt.plot()

                print("Test loss: %f ,miou: %f" %
                      (np.average(np.array(test_losses)), eval_miou))

            if math.isnan(float(avg_loss_value[0])):
                sys.exit("got NaN loss, training failed.")

            if step % 100 == 0 and param_base_dir is not None and eval_miou > best_miou:
                best_miou = eval_miou
                print("Saving params of step: %d" % step)
                fluid.io.save_inference_model(infer_param_path,
                                              feeded_var_names=['image'],
                                              target_vars=[prediction],
                                              executor=exe,
                                              main_program=train_program)
                fluid.io.save_persistables(exe, ckpt_param_path, train_program)
            step += 1
    print(best_miou)