Beispiel #1
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    init_model(config, net, optimizer=None)
    valid_dataloader = Reader(config, 'valid', places=place)()
    net.eval()
    with paddle.no_grad():
        top1_acc = program.run(valid_dataloader, config, net, None, None, 0,
                               'valid')
    return_dict["top1_acc"] = top1_acc
    return top1_acc
Beispiel #2
0
def main(args):
    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    config = get_config(args.config, overrides=args.override, show=True)
    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id)

    startup_prog = fluid.Program()
    valid_prog = fluid.Program()
    valid_dataloader, valid_fetchs = program.build(
        config, valid_prog, startup_prog, is_train=False)
    valid_prog = valid_prog.clone(for_test=True)

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    init_model(config, valid_prog, exe)

    valid_reader = Reader(config, 'valid')()
    valid_dataloader.set_sample_list_generator(valid_reader, place)

    compiled_valid_prog = program.compile(config, valid_prog)
    program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, -1,
                'eval', config)
Beispiel #3
0
def main(args):
    config = get_config(args.config, overrides=args.override, show=True)
    use_gpu = config.get("use_gpu", True)
    places = fluid.cuda_places() if use_gpu else fluid.cpu_places()

    startup_prog = fluid.Program()
    valid_prog = fluid.Program()
    valid_dataloader, valid_fetchs = program.build(config,
                                                   valid_prog,
                                                   startup_prog,
                                                   is_train=False,
                                                   is_distributed=False)
    valid_prog = valid_prog.clone(for_test=True)

    exe = fluid.Executor(places[0])
    exe.run(startup_prog)

    init_model(config, valid_prog, exe)

    valid_reader = Reader(config, 'valid')()
    valid_dataloader.set_sample_list_generator(valid_reader, places)

    compiled_valid_prog = program.compile(config, valid_prog)
    program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, -1,
                'eval')
Beispiel #4
0
def main(args):
    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id)

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    train_dataloader, train_fetchs = program.build(config,
                                                   train_prog,
                                                   startup_prog,
                                                   is_train=True)

    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs = program.build(config,
                                                       valid_prog,
                                                       startup_prog,
                                                       is_train=False)
        # clone to prune some content which is irrelevant in valid_prog
        valid_prog = valid_prog.clone(for_test=True)

    # create the "Executor" with the statement of which place
    exe = fluid.Executor(place=place)
    # only run startup_prog once to init
    exe.run(startup_prog)

    # load model from checkpoint or pretrained model
    init_model(config, train_prog, exe)

    train_reader = Reader(config, 'train')()
    train_dataloader.set_sample_list_generator(train_reader, place)

    if config.validate:
        valid_reader = Reader(config, 'valid')()
        valid_dataloader.set_sample_list_generator(valid_reader, place)
        compiled_valid_prog = program.compile(config, valid_prog)

    compiled_train_prog = fleet.main_program
    for epoch_id in range(config.epochs):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train')
        # 2. validate with validate dataset
        if config.validate and epoch_id % config.valid_interval == 0:
            program.run(valid_dataloader, exe, compiled_valid_prog,
                        valid_fetchs, epoch_id, 'valid')

        # 3. save the persistable model
        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.ARCHITECTURE["name"])
            save_model(train_prog, model_path, epoch_id)
Beispiel #5
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')
    multilabel = config.get("multilabel", False)

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)

    init_model(config, net, optimizer=None)
    valid_dataloader = Reader(config, 'valid', places=place)()
    net.eval()
    with paddle.no_grad():
        if not multilabel:
            top1_acc = program.run(valid_dataloader, config, net, None, None,
                                   0, 'valid')
            return_dict["top1_acc"] = top1_acc

            return top1_acc
        else:
            all_outs = []
            targets = []
            for _, batch in enumerate(valid_dataloader()):
                feeds = program.create_feeds(batch, False, config.classes_num,
                                             multilabel)
                out = net(feeds["image"])
                out = F.sigmoid(out)

                use_distillation = config.get("use_distillation", False)
                if use_distillation:
                    out = out[1]

                all_outs.extend(list(out.numpy()))
                targets.extend(list(feeds["label"].numpy()))
            all_outs = np.array(all_outs)
            targets = np.array(targets)

            mAP = mean_average_precision(all_outs, targets)

            return_dict["mean average precision"] = mAP

            return mAP
Beispiel #6
0
def main(args):
    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    config = get_config(args.config, overrides=args.override, show=True)
    place = env.place()

    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    train_dataloader, train_fetchs = program.build(
        config, train_prog, startup_prog, is_train=True)

    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs = program.build(
            config, valid_prog, startup_prog, is_train=False)
        valid_prog = valid_prog.clone(for_test=True)

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    init_model(config, train_prog, exe)

    train_reader = Reader(config, 'train')()
    train_dataloader.set_sample_list_generator(train_reader, place)

    if config.validate:
        valid_reader = Reader(config, 'valid')()
        valid_dataloader.set_sample_list_generator(valid_reader, place)
        compiled_valid_prog = program.compile(config, valid_prog)

    compiled_train_prog = fleet.main_program
    for epoch_id in range(config.epochs):
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train')

        if config.validate and epoch_id % config.valid_interval == 0:
            program.run(valid_dataloader, exe, compiled_valid_prog,
                        valid_fetchs, epoch_id, 'valid')

        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.architecture)
            save_model(train_prog, model_path, epoch_id)
Beispiel #7
0
def main():
    args = conf.parse_args()
    config = conf.get_config(args.config, overrides=args.override, show=False)

    assert os.path.exists(
        os.path.join(config["Global"]["save_inference_dir"],
                     'inference.pdmodel')) and os.path.exists(
                         os.path.join(config["Global"]["save_inference_dir"],
                                      'inference.pdiparams'))
    config["DataLoader"]["Eval"]["sampler"]["batch_size"] = 1
    config["DataLoader"]["Eval"]["loader"]["num_workers"] = 0
    init_logger()
    device = paddle.set_device("cpu")
    train_dataloader = build_dataloader(config["DataLoader"], "Eval", device,
                                        False)

    def sample_generator(loader):
        def __reader__():
            for indx, data in enumerate(loader):
                images = np.array(data[0])
                yield images

        return __reader__

    paddle.enable_static()
    place = paddle.CPUPlace()
    exe = paddle.static.Executor(place)
    paddleslim.quant.quant_post_static(
        executor=exe,
        model_dir=config["Global"]["save_inference_dir"],
        model_filename='inference.pdmodel',
        params_filename='inference.pdiparams',
        quantize_model_path=os.path.join(
            config["Global"]["save_inference_dir"], "quant_post_static_model"),
        sample_generator=sample_generator(train_dataloader),
        batch_nums=10)
Beispiel #8
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    init_model(config, net, optimizer=None)
    save_model(net,
               None,
               model_path='./pretrained/10w/',
               epoch_id=0,
               prefix='ppcls')
Beispiel #9
0
def main(args):
    """
    all the config of training paradigm should be in config["Global"]
    """
    config = get_config(args.config, overrides=args.override, show=False)
    global_config = config["Global"]

    mode = "train"

    log_file = os.path.join(global_config['output_dir'],
                            config["Arch"]["name"], f"{mode}.log")
    init_logger(log_file=log_file)
    print_config(config)

    if global_config.get("is_distributed", True):
        fleet.init(is_collective=True)
    # assign the device
    use_gpu = global_config.get("use_gpu", True)
    # amp related config
    if 'AMP' in config:
        AMP_RELATED_FLAGS_SETTING = {
            'FLAGS_cudnn_exhaustive_search': 1,
            'FLAGS_conv_workspace_size_limit': 1500,
            'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
            'FLAGS_max_inplace_grad_add': 8,
        }
        os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1'
        paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING)

    use_xpu = global_config.get("use_xpu", False)
    use_npu = global_config.get("use_npu", False)
    use_mlu = global_config.get("use_mlu", False)
    assert (
        use_gpu and use_xpu and use_npu and use_mlu
    ) is not True, "gpu, xpu, npu and mlu can not be true in the same time in static mode!"

    if use_gpu:
        device = paddle.set_device('gpu')
    elif use_xpu:
        device = paddle.set_device('xpu')
    elif use_npu:
        device = paddle.set_device('npu')
    elif use_mlu:
        device = paddle.set_device('mlu')
    else:
        device = paddle.set_device('cpu')

    # visualDL
    vdl_writer = None
    if global_config["use_visualdl"]:
        vdl_dir = os.path.join(global_config["output_dir"], "vdl")
        vdl_writer = LogWriter(vdl_dir)

    # build dataloader
    eval_dataloader = None
    use_dali = global_config.get('use_dali', False)

    class_num = config["Arch"].get("class_num", None)
    config["DataLoader"].update({"class_num": class_num})
    train_dataloader = build_dataloader(config["DataLoader"],
                                        "Train",
                                        device=device,
                                        use_dali=use_dali)
    if global_config["eval_during_train"]:
        eval_dataloader = build_dataloader(config["DataLoader"],
                                           "Eval",
                                           device=device,
                                           use_dali=use_dali)

    step_each_epoch = len(train_dataloader)

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = paddle.static.Program()
    train_prog = paddle.static.Program()

    best_top1_acc = 0.0  # best top1 acc record

    train_fetchs, lr_scheduler, train_feeds, optimizer = program.build(
        config,
        train_prog,
        startup_prog,
        class_num,
        step_each_epoch=step_each_epoch,
        is_train=True,
        is_distributed=global_config.get("is_distributed", True))

    if global_config["eval_during_train"]:
        eval_prog = paddle.static.Program()
        eval_fetchs, _, eval_feeds, _ = program.build(
            config,
            eval_prog,
            startup_prog,
            is_train=False,
            is_distributed=global_config.get("is_distributed", True))
        # clone to prune some content which is irrelevant in eval_prog
        eval_prog = eval_prog.clone(for_test=True)

    # create the "Executor" with the statement of which device
    exe = paddle.static.Executor(device)
    # Parameter initialization
    exe.run(startup_prog)
    # load pretrained models or checkpoints
    init_model(global_config, train_prog, exe)

    if 'AMP' in config and config.AMP.get("level", "O1") == "O2":
        optimizer.amp_init(device,
                           scope=paddle.static.global_scope(),
                           test_program=eval_prog
                           if global_config["eval_during_train"] else None)

    if not global_config.get("is_distributed", True):
        compiled_train_prog = program.compile(
            config, train_prog, loss_name=train_fetchs["loss"][0].name)
    else:
        compiled_train_prog = train_prog

    if eval_dataloader is not None:
        compiled_eval_prog = program.compile(config, eval_prog)

    for epoch_id in range(global_config["epochs"]):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_feeds,
                    train_fetchs, epoch_id, 'train', config, vdl_writer,
                    lr_scheduler, args.profiler_options)
        # 2. evaate with eval dataset
        if global_config["eval_during_train"] and epoch_id % global_config[
                "eval_interval"] == 0:
            top1_acc = program.run(eval_dataloader, exe, compiled_eval_prog,
                                   eval_feeds, eval_fetchs, epoch_id, "eval",
                                   config)
            if top1_acc > best_top1_acc:
                best_top1_acc = top1_acc
                message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                    best_top1_acc, epoch_id)
                logger.info(message)
                if epoch_id % global_config["save_interval"] == 0:

                    model_path = os.path.join(global_config["output_dir"],
                                              config["Arch"]["name"])
                    save_model(train_prog, model_path, "best_model")

        # 3. save the persistable model
        if epoch_id % global_config["save_interval"] == 0:
            model_path = os.path.join(global_config["output_dir"],
                                      config["Arch"]["name"])
            save_model(train_prog, model_path, epoch_id)
Beispiel #10
0
def main(args):
    paddle.seed(12345)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    optimizer, lr_scheduler = program.create_optimizer(
        config, parameter_list=net.parameters())

    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    # load model from checkpoint or pretrained model
    init_model(config, net, optimizer)

    train_dataloader = Reader(config, 'train', places=place)()

    if config.validate:
        valid_dataloader = Reader(config, 'valid', places=place)()

    last_epoch_id = config.get("last_epoch", -1)
    best_top1_acc = 0.0  # best top1 acc record
    best_top1_epoch = last_epoch_id
    for epoch_id in range(last_epoch_id + 1, config.epochs):
        net.train()
        # 1. train with train dataset
        program.run(train_dataloader, config, net, optimizer, lr_scheduler,
                    epoch_id, 'train')

        # 2. validate with validate dataset
        if config.validate and epoch_id % config.valid_interval == 0:
            net.eval()
            with paddle.no_grad():
                top1_acc = program.run(valid_dataloader, config, net, None,
                                       None, epoch_id, 'valid')
            if top1_acc > best_top1_acc:
                best_top1_acc = top1_acc
                best_top1_epoch = epoch_id
                if epoch_id % config.save_interval == 0:
                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(net, optimizer, model_path, "best_model")
            message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                best_top1_acc, best_top1_epoch)
            logger.info("{:s}".format(logger.coloring(message, "RED")))

        # 3. save the persistable model
        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.ARCHITECTURE["name"])
            save_model(net, optimizer, model_path, epoch_id)
Beispiel #11
0
def main(args):
    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id)

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    best_top1_acc = 0.0  # best top1 acc record

    if not config.get('use_ema'):
        train_dataloader, train_fetchs = program.build(config,
                                                       train_prog,
                                                       startup_prog,
                                                       is_train=True)
    else:
        train_dataloader, train_fetchs, ema = program.build(config,
                                                            train_prog,
                                                            startup_prog,
                                                            is_train=True)

    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs = program.build(config,
                                                       valid_prog,
                                                       startup_prog,
                                                       is_train=False)
        # clone to prune some content which is irrelevant in valid_prog
        valid_prog = valid_prog.clone(for_test=True)

    # create the "Executor" with the statement of which place
    exe = fluid.Executor(place)
    # Parameter initialization
    exe.run(startup_prog)

    # load model from 1. checkpoint to resume training, 2. pretrained model to finetune
    init_model(config, train_prog, exe)

    train_reader = Reader(config, 'train')()
    train_dataloader.set_sample_list_generator(train_reader, place)

    if config.validate:
        valid_reader = Reader(config, 'valid')()
        valid_dataloader.set_sample_list_generator(valid_reader, place)
        compiled_valid_prog = program.compile(config, valid_prog)

    compiled_train_prog = fleet.main_program
    vdl_writer = LogWriter(args.vdl_dir) if args.vdl_dir else None

    for epoch_id in range(config.epochs):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train', vdl_writer)
        if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
            # 2. validate with validate dataset
            if config.validate and epoch_id % config.valid_interval == 0:
                if config.get('use_ema'):
                    logger.info(logger.coloring("EMA validate start..."))
                    with train_fetchs('ema').apply(exe):
                        top1_acc = program.run(valid_dataloader, exe,
                                               compiled_valid_prog,
                                               valid_fetchs, epoch_id, 'valid')
                    logger.info(logger.coloring("EMA validate over!"))

                top1_acc = program.run(valid_dataloader, exe,
                                       compiled_valid_prog, valid_fetchs,
                                       epoch_id, 'valid')
                if top1_acc > best_top1_acc:
                    best_top1_acc = top1_acc
                    message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                        best_top1_acc, epoch_id)
                    logger.info("{:s}".format(logger.coloring(message, "RED")))
                    if epoch_id % config.save_interval == 0:

                        model_path = os.path.join(config.model_save_dir,
                                                  config.ARCHITECTURE["name"])
                        save_model(train_prog, model_path,
                                   "best_model_in_epoch_" + str(epoch_id))

            # 3. save the persistable model
            if epoch_id % config.save_interval == 0:
                model_path = os.path.join(config.model_save_dir,
                                          config.ARCHITECTURE["name"])
                save_model(train_prog, model_path, epoch_id)
Beispiel #12
0
def main(args):
    role = role_maker.PaddleCloudRoleMaker(is_collective=True)
    fleet.init(role)

    config = get_config(args.config, overrides=args.override, show=True)
    use_fp16 = config.get('use_fp16', False)
    if use_fp16:
        AMP_RELATED_FLAGS_SETTING = {
            'FLAGS_cudnn_exhaustive_search': 1,
            'FLAGS_conv_workspace_size_limit': 4000,
            'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
            'FLAGS_max_inplace_grad_add': 8,
        }
        os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1'
        paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING)
    # assign the place
    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id)

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    best_top1_acc = 0.0  # best top1 acc record

    if not config.get('use_ema'):
        train_dataloader, train_fetchs = program.build(
            config, train_prog, startup_prog, is_train=True)
    else:
        train_dataloader, train_fetchs, ema = program.build(
            config, train_prog, startup_prog, is_train=True)

    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs = program.build(
            config, valid_prog, startup_prog, is_train=False)
        # clone to prune some content which is irrelevant in valid_prog
        valid_prog = valid_prog.clone(for_test=True)

    # create the "Executor" with the statement of which place
    exe = fluid.Executor(place)
    # Parameter initialization
    exe.run(startup_prog)

    # load model from 1. checkpoint to resume training, 2. pretrained model to finetune
    init_model(config, train_prog, exe)
    if not config.get('use_dali', False):
        train_reader = Reader(config, 'train')()
        train_dataloader.set_sample_list_generator(train_reader, place)
        if config.validate:
            valid_reader = Reader(config, 'valid')()
            valid_dataloader.set_sample_list_generator(valid_reader, place)
            compiled_valid_prog = program.compile(config, valid_prog)

    else:
        import dali
        train_dataloader = dali.train(config)
        if config.validate and int(os.getenv("PADDLE_TRAINER_ID", 0)):
            if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
                valid_dataloader = dali.val(config)
            compiled_valid_prog = program.compile(config, valid_prog)

    compiled_train_prog = fleet.main_program

    vdl_writer = None
    if args.vdl_dir:
        if version_info.major == 2:
            logger.info(
                "visualdl is just supported for python3, so it is disabled in python2..."
            )
        else:
            from visualdl import LogWriter
            vdl_writer = LogWriter(args.vdl_dir)

    for epoch_id in range(config.epochs):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train', config, vdl_writer)
        if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
            # 2. validate with validate dataset
            if config.validate and epoch_id % config.valid_interval == 0:
                if config.get('use_ema'):
                    logger.info(logger.coloring("EMA validate start..."))
                    with ema.apply(exe):
                        top1_acc = program.run(
                            valid_dataloader, exe, compiled_valid_prog,
                            valid_fetchs, epoch_id, 'valid', config)
                    logger.info(logger.coloring("EMA validate over!"))

                top1_acc = program.run(valid_dataloader, exe,
                                       compiled_valid_prog, valid_fetchs,
                                       epoch_id, 'valid', config)
                if top1_acc > best_top1_acc:
                    best_top1_acc = top1_acc
                    message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                        best_top1_acc, epoch_id)
                    logger.info("{:s}".format(logger.coloring(message, "RED")))
                    if epoch_id % config.save_interval == 0:

                        model_path = os.path.join(config.model_save_dir,
                                                  config.ARCHITECTURE["name"])
                        save_model(train_prog, model_path, "best_model")

            # 3. save the persistable model
            if epoch_id % config.save_interval == 0:
                model_path = os.path.join(config.model_save_dir,
                                          config.ARCHITECTURE["name"])
                save_model(train_prog, model_path, epoch_id)
Beispiel #13
0
def main(args):
    paddle.seed(12345)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    optimizer, lr_scheduler = program.create_optimizer(
        config, parameter_list=net.parameters())

    dp_net = net
    if config["use_data_parallel"]:
        find_unused_parameters = config.get("find_unused_parameters", False)
        dp_net = paddle.DataParallel(
            net, find_unused_parameters=find_unused_parameters)

    # load model from checkpoint or pretrained model
    init_model(config, net, optimizer)

    train_dataloader = Reader(config, 'train', places=place)()

    if config.validate:
        valid_dataloader = Reader(config, 'valid', places=place)()

    last_epoch_id = config.get("last_epoch", -1)
    best_top1_acc = 0.0  # best top1 acc record
    best_top1_epoch = last_epoch_id

    vdl_writer_path = config.get("vdl_dir", None)
    vdl_writer = None
    if vdl_writer_path:
        from visualdl import LogWriter
        vdl_writer = LogWriter(vdl_writer_path)
    # Ensure that the vdl log file can be closed normally
    try:
        for epoch_id in range(last_epoch_id + 1, config.epochs):
            net.train()
            # 1. train with train dataset
            program.run(train_dataloader, config, dp_net, optimizer,
                        lr_scheduler, epoch_id, 'train', vdl_writer)

            # 2. validate with validate dataset
            if config.validate and epoch_id % config.valid_interval == 0:
                net.eval()
                with paddle.no_grad():
                    top1_acc = program.run(valid_dataloader, config, net, None,
                                           None, epoch_id, 'valid', vdl_writer)
                if top1_acc > best_top1_acc:
                    best_top1_acc = top1_acc
                    best_top1_epoch = epoch_id
                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(net, optimizer, model_path, "best_model")
                message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                    best_top1_acc, best_top1_epoch)
                logger.info(message)

            # 3. save the persistable model
            if epoch_id % config.save_interval == 0:
                model_path = os.path.join(config.model_save_dir,
                                          config.ARCHITECTURE["name"])
                save_model(net, optimizer, model_path, epoch_id)
    except Exception as e:
        logger.error(e)
    finally:
        vdl_writer.close() if vdl_writer else None
Beispiel #14
0
def main(args):
    config = get_config(args.config, overrides=args.override, show=True)
    if config.get("is_distributed", True):
        fleet.init(is_collective=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    # amp related config
    use_amp = config.get('use_amp', False)
    use_pure_fp16 = config.get('use_pure_fp16', False)
    if use_amp or use_pure_fp16:
        AMP_RELATED_FLAGS_SETTING = {
            'FLAGS_cudnn_exhaustive_search': 1,
            'FLAGS_conv_workspace_size_limit': 4000,
            'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
            'FLAGS_max_inplace_grad_add': 8,
        }
        os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1'
        paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING)
    use_xpu = config.get("use_xpu", False)
    assert (
        use_gpu and use_xpu
    ) is not True, "gpu and xpu can not be true in the same time in static mode!"

    if use_gpu:
        place = paddle.set_device('gpu')
    elif use_xpu:
        place = paddle.set_device('xpu')
    else:
        place = paddle.set_device('cpu')

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = paddle.static.Program()
    train_prog = paddle.static.Program()

    best_top1_acc = 0.0  # best top1 acc record

    train_fetchs, lr_scheduler, train_feeds = program.build(
        config,
        train_prog,
        startup_prog,
        is_train=True,
        is_distributed=config.get("is_distributed", True))

    if config.validate:
        valid_prog = paddle.static.Program()
        valid_fetchs, _, valid_feeds = program.build(config,
                                                     valid_prog,
                                                     startup_prog,
                                                     is_train=False,
                                                     is_distributed=config.get(
                                                         "is_distributed",
                                                         True))
        # clone to prune some content which is irrelevant in valid_prog
        valid_prog = valid_prog.clone(for_test=True)

    # create the "Executor" with the statement of which place
    exe = paddle.static.Executor(place)
    # Parameter initialization
    exe.run(startup_prog)
    if config.get("use_pure_fp16", False):
        cast_parameters_to_fp16(place, train_prog, fluid.global_scope())
    # load pretrained models or checkpoints
    init_model(config, train_prog, exe)

    if not config.get("is_distributed", True):
        compiled_train_prog = program.compile(
            config, train_prog, loss_name=train_fetchs["loss"][0].name)
    else:
        compiled_train_prog = train_prog

    if not config.get('use_dali', False):
        train_dataloader = Reader(config, 'train', places=place)()
        if config.validate and paddle.distributed.get_rank() == 0:
            valid_dataloader = Reader(config, 'valid', places=place)()
            if use_xpu:
                compiled_valid_prog = valid_prog
            else:
                compiled_valid_prog = program.compile(config, valid_prog)
    else:
        assert use_gpu is True, "DALI only support gpu, please set use_gpu to True!"
        import dali
        train_dataloader = dali.train(config)
        if config.validate and paddle.distributed.get_rank() == 0:
            valid_dataloader = dali.val(config)
            compiled_valid_prog = program.compile(config, valid_prog)

    vdl_writer = None
    if args.vdl_dir:
        if version_info.major == 2:
            logger.info(
                "visualdl is just supported for python3, so it is disabled in python2..."
            )
        else:
            from visualdl import LogWriter
            vdl_writer = LogWriter(args.vdl_dir)

    for epoch_id in range(config.epochs):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_feeds,
                    train_fetchs, epoch_id, 'train', config, vdl_writer,
                    lr_scheduler)
        if paddle.distributed.get_rank() == 0:
            # 2. validate with validate dataset
            if config.validate and epoch_id % config.valid_interval == 0:
                top1_acc = program.run(valid_dataloader, exe,
                                       compiled_valid_prog, valid_feeds,
                                       valid_fetchs, epoch_id, 'valid', config)
                if top1_acc > best_top1_acc:
                    best_top1_acc = top1_acc
                    message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                        best_top1_acc, epoch_id)
                    logger.info("{:s}".format(logger.coloring(message, "RED")))
                    if epoch_id % config.save_interval == 0:

                        model_path = os.path.join(config.model_save_dir,
                                                  config.ARCHITECTURE["name"])
                        save_model(train_prog, model_path, "best_model")

            # 3. save the persistable model
            if epoch_id % config.save_interval == 0:
                model_path = os.path.join(config.model_save_dir,
                                          config.ARCHITECTURE["name"])
                save_model(train_prog, model_path, epoch_id)
Beispiel #15
0
def main(args):
    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    places = fluid.cuda_places() if use_gpu else fluid.cpu_places()

    # startup_prog is used to do some parameter init work,
    # and train prog is used to hold the network
    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    best_top1_acc = 0.0  # best top1 acc record

    if not config.get('use_ema'):
        train_dataloader, train_fetchs = program.build(config,
                                                       train_prog,
                                                       startup_prog,
                                                       is_train=True,
                                                       is_distributed=False)
    else:
        train_dataloader, train_fetchs, ema = program.build(
            config,
            train_prog,
            startup_prog,
            is_train=True,
            is_distributed=False)

    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs = program.build(config,
                                                       valid_prog,
                                                       startup_prog,
                                                       is_train=False,
                                                       is_distributed=False)
        # clone to prune some content which is irrelevant in valid_prog
        valid_prog = valid_prog.clone(for_test=True)

    # create the "Executor" with the statement of which place
    exe = fluid.Executor(places[0])
    # Parameter initialization
    exe.run(startup_prog)

    # load model from 1. checkpoint to resume training, 2. pretrained model to finetune
    init_model(config, train_prog, exe)

    train_reader = Reader(config, 'train')()
    train_dataloader.set_sample_list_generator(train_reader, places)

    compiled_train_prog = program.compile(config, train_prog,
                                          train_fetchs['loss'][0].name)

    if config.validate:
        valid_reader = Reader(config, 'valid')()
        valid_dataloader.set_sample_list_generator(valid_reader, places)
        compiled_valid_prog = program.compile(config,
                                              valid_prog,
                                              share_prog=compiled_train_prog)

    if args.vdl_dir:
        from visualdl import LogWriter
        vdl_writer = LogWriter(args.vdl_dir)
    else:
        vdl_writer = None

    for epoch_id in range(config.epochs):
        # 1. train with train dataset
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train', vdl_writer)

        # 2. validate with validate dataset
        if config.validate and epoch_id % config.valid_interval == 0:
            if config.get('use_ema'):
                logger.info(logger.coloring("EMA validate start..."))
                with ema.apply(exe):
                    top1_acc = program.run(valid_dataloader, exe,
                                           compiled_valid_prog, valid_fetchs,
                                           epoch_id, 'valid')
                logger.info(logger.coloring("EMA validate over!"))

            top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog,
                                   valid_fetchs, epoch_id, 'valid')
            if top1_acc > best_top1_acc:
                best_top1_acc = top1_acc
                message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                    best_top1_acc, epoch_id)
                logger.info("{:s}".format(logger.coloring(message, "RED")))
                if epoch_id % config.save_interval == 0:

                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(train_prog, model_path,
                               "best_model_in_epoch_" + str(epoch_id))

        # 3. save the persistable model
        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.ARCHITECTURE["name"])
            save_model(train_prog, model_path, epoch_id)
Beispiel #16
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))

from ppcls.utils import config
from ppcls.engine.engine import Engine

if __name__ == "__main__":
    args = config.parse_args()
    config = config.get_config(args.config,
                               overrides=args.override,
                               show=False)
    engine = Engine(config, mode="infer")
    engine.infer()
Beispiel #17
0
def main(args):
    config = get_config(args.config, overrides=args.override, show=True)
    # 如果需要量化训练,就必须开启评估
    if not config.validate and args.use_quant:
        logger.error("=====>Train quant model must use validate!")
        sys.exit(1)
    if args.use_quant:
        config.epochs = config.epochs + 5
        gpu_count = get_gpu_count()
        if gpu_count != 1:
            logger.error(
                "=====>`Train quant model must use only one GPU. "
                "Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` ."
            )
            sys.exit(1)

    # 设置是否使用 GPU
    use_gpu = config.get("use_gpu", True)
    places = fluid.cuda_places() if use_gpu else fluid.cpu_places()

    startup_prog = fluid.Program()
    train_prog = fluid.Program()

    best_top1_acc = 0.0

    # 获取训练数据和模型输出
    if not config.get('use_ema'):
        train_dataloader, train_fetchs, out, softmax_out = program.build(
            config,
            train_prog,
            startup_prog,
            is_train=True,
            is_distributed=False)
    else:
        train_dataloader, train_fetchs, ema, out, softmax_out = program.build(
            config,
            train_prog,
            startup_prog,
            is_train=True,
            is_distributed=False)
    # 获取评估数据和模型输出
    if config.validate:
        valid_prog = fluid.Program()
        valid_dataloader, valid_fetchs, _, _ = program.build(
            config,
            valid_prog,
            startup_prog,
            is_train=False,
            is_distributed=False)
        # 克隆评估程序,可以去掉与评估无关的计算
        valid_prog = valid_prog.clone(for_test=True)

    # 创建执行器
    exe = fluid.Executor(places[0])
    exe.run(startup_prog)

    # 加载模型,可以是预训练模型,也可以是检查点
    init_model(config, train_prog, exe)

    train_reader = Reader(config, 'train')()
    train_dataloader.set_sample_list_generator(train_reader, places)

    compiled_train_prog = program.compile(config, train_prog,
                                          train_fetchs['loss'][0].name)

    if config.validate:
        valid_reader = Reader(config, 'valid')()
        valid_dataloader.set_sample_list_generator(valid_reader, places)
        compiled_valid_prog = program.compile(config,
                                              valid_prog,
                                              share_prog=compiled_train_prog)

    vdl_writer = LogWriter(args.vdl_dir)

    for epoch_id in range(config.epochs - 5):
        # 训练一轮
        program.run(train_dataloader, exe, compiled_train_prog, train_fetchs,
                    epoch_id, 'train', config, vdl_writer)

        # 执行一次评估
        if config.validate and epoch_id % config.valid_interval == 0:
            if config.get('use_ema'):
                logger.info(logger.coloring("EMA validate start..."))
                with ema.apply(exe):
                    _ = program.run(valid_dataloader, exe, compiled_valid_prog,
                                    valid_fetchs, epoch_id, 'valid', config)
                logger.info(logger.coloring("EMA validate over!"))

            top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog,
                                   valid_fetchs, epoch_id, 'valid', config)

            if vdl_writer:
                logger.scaler('valid_avg', top1_acc, epoch_id, vdl_writer)

            if top1_acc > best_top1_acc:
                best_top1_acc = top1_acc
                message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                    best_top1_acc, epoch_id)
                logger.info("{:s}".format(logger.coloring(message, "RED")))
                if epoch_id % config.save_interval == 0:
                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(train_prog, model_path, "best_model")

        # 保存模型
        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.ARCHITECTURE["name"])
            if epoch_id >= 3 and os.path.exists(
                    os.path.join(model_path, str(epoch_id - 3))):
                shutil.rmtree(os.path.join(model_path, str(epoch_id - 3)),
                              ignore_errors=True)
            save_model(train_prog, model_path, epoch_id)

    # 量化训练
    if args.use_quant and config.validate:
        # 执行量化训练
        quant_program = slim.quant.quant_aware(train_prog,
                                               exe.place,
                                               for_test=False)
        # 评估量化的结果
        val_quant_program = slim.quant.quant_aware(valid_prog,
                                                   exe.place,
                                                   for_test=True)

        fetch_list = [f[0] for f in train_fetchs.values()]
        metric_list = [f[1] for f in train_fetchs.values()]
        for i in range(5):
            for idx, batch in enumerate(train_dataloader()):
                metrics = exe.run(program=quant_program,
                                  feed=batch,
                                  fetch_list=fetch_list)
                for i, m in enumerate(metrics):
                    metric_list[i].update(np.mean(m), len(batch[0]))
                fetchs_str = ''.join([str(m.value) + ' ' for m in metric_list])

                if idx % 10 == 0:
                    logger.info("quant train : " + fetchs_str)

        fetch_list = [f[0] for f in valid_fetchs.values()]
        metric_list = [f[1] for f in valid_fetchs.values()]
        for idx, batch in enumerate(valid_dataloader()):
            metrics = exe.run(program=val_quant_program,
                              feed=batch,
                              fetch_list=fetch_list)
            for i, m in enumerate(metrics):
                metric_list[i].update(np.mean(m), len(batch[0]))
            fetchs_str = ''.join([str(m.value) + ' ' for m in metric_list])

            if idx % 10 == 0:
                logger.info("quant valid: " + fetchs_str)

        # 保存量化训练模型
        float_prog, int8_prog = slim.quant.convert(val_quant_program,
                                                   exe.place,
                                                   save_int8=True)
        fluid.io.save_inference_model(dirname=args.output_path,
                                      feeded_var_names=['feed_image'],
                                      target_vars=[softmax_out],
                                      executor=exe,
                                      main_program=float_prog,
                                      model_filename='__model__',
                                      params_filename='__params__')