Example #1
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    init_model(config, net, optimizer=None)
    valid_dataloader = Reader(config, 'valid', places=place)()
    net.eval()
    with paddle.no_grad():
        top1_acc = program.run(valid_dataloader, config, net, None, None, 0,
                               'valid')
    return_dict["top1_acc"] = top1_acc
    return top1_acc
Example #2
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')
    multilabel = config.get("multilabel", False)

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)

    init_model(config, net, optimizer=None)
    valid_dataloader = Reader(config, 'valid', places=place)()
    net.eval()
    with paddle.no_grad():
        if not multilabel:
            top1_acc = program.run(valid_dataloader, config, net, None, None,
                                   0, 'valid')
            return_dict["top1_acc"] = top1_acc

            return top1_acc
        else:
            all_outs = []
            targets = []
            for _, batch in enumerate(valid_dataloader()):
                feeds = program.create_feeds(batch, False, config.classes_num,
                                             multilabel)
                out = net(feeds["image"])
                out = F.sigmoid(out)

                use_distillation = config.get("use_distillation", False)
                if use_distillation:
                    out = out[1]

                all_outs.extend(list(out.numpy()))
                targets.extend(list(feeds["label"].numpy()))
            all_outs = np.array(all_outs)
            targets = np.array(targets)

            mAP = mean_average_precision(all_outs, targets)

            return_dict["mean average precision"] = mAP

            return mAP
Example #3
0
def main(args, return_dict={}):
    config = get_config(args.config, overrides=args.override, show=True)
    config.mode = "valid"
    # assign place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    init_model(config, net, optimizer=None)
    save_model(net,
               None,
               model_path='./pretrained/10w/',
               epoch_id=0,
               prefix='ppcls')
Example #4
0
def main(args):
    paddle.seed(12345)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    optimizer, lr_scheduler = program.create_optimizer(
        config, parameter_list=net.parameters())

    if config["use_data_parallel"]:
        net = paddle.DataParallel(net)

    # load model from checkpoint or pretrained model
    init_model(config, net, optimizer)

    train_dataloader = Reader(config, 'train', places=place)()

    if config.validate:
        valid_dataloader = Reader(config, 'valid', places=place)()

    last_epoch_id = config.get("last_epoch", -1)
    best_top1_acc = 0.0  # best top1 acc record
    best_top1_epoch = last_epoch_id
    for epoch_id in range(last_epoch_id + 1, config.epochs):
        net.train()
        # 1. train with train dataset
        program.run(train_dataloader, config, net, optimizer, lr_scheduler,
                    epoch_id, 'train')

        # 2. validate with validate dataset
        if config.validate and epoch_id % config.valid_interval == 0:
            net.eval()
            with paddle.no_grad():
                top1_acc = program.run(valid_dataloader, config, net, None,
                                       None, epoch_id, 'valid')
            if top1_acc > best_top1_acc:
                best_top1_acc = top1_acc
                best_top1_epoch = epoch_id
                if epoch_id % config.save_interval == 0:
                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(net, optimizer, model_path, "best_model")
            message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                best_top1_acc, best_top1_epoch)
            logger.info("{:s}".format(logger.coloring(message, "RED")))

        # 3. save the persistable model
        if epoch_id % config.save_interval == 0:
            model_path = os.path.join(config.model_save_dir,
                                      config.ARCHITECTURE["name"])
            save_model(net, optimizer, model_path, epoch_id)
Example #5
0
def main(args):
    paddle.seed(12345)

    config = get_config(args.config, overrides=args.override, show=True)
    # assign the place
    use_gpu = config.get("use_gpu", True)
    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    trainer_num = paddle.distributed.get_world_size()
    use_data_parallel = trainer_num != 1
    config["use_data_parallel"] = use_data_parallel

    if config["use_data_parallel"]:
        paddle.distributed.init_parallel_env()

    net = program.create_model(config.ARCHITECTURE, config.classes_num)
    optimizer, lr_scheduler = program.create_optimizer(
        config, parameter_list=net.parameters())

    dp_net = net
    if config["use_data_parallel"]:
        find_unused_parameters = config.get("find_unused_parameters", False)
        dp_net = paddle.DataParallel(
            net, find_unused_parameters=find_unused_parameters)

    # load model from checkpoint or pretrained model
    init_model(config, net, optimizer)

    train_dataloader = Reader(config, 'train', places=place)()

    if config.validate:
        valid_dataloader = Reader(config, 'valid', places=place)()

    last_epoch_id = config.get("last_epoch", -1)
    best_top1_acc = 0.0  # best top1 acc record
    best_top1_epoch = last_epoch_id

    vdl_writer_path = config.get("vdl_dir", None)
    vdl_writer = None
    if vdl_writer_path:
        from visualdl import LogWriter
        vdl_writer = LogWriter(vdl_writer_path)
    # Ensure that the vdl log file can be closed normally
    try:
        for epoch_id in range(last_epoch_id + 1, config.epochs):
            net.train()
            # 1. train with train dataset
            program.run(train_dataloader, config, dp_net, optimizer,
                        lr_scheduler, epoch_id, 'train', vdl_writer)

            # 2. validate with validate dataset
            if config.validate and epoch_id % config.valid_interval == 0:
                net.eval()
                with paddle.no_grad():
                    top1_acc = program.run(valid_dataloader, config, net, None,
                                           None, epoch_id, 'valid', vdl_writer)
                if top1_acc > best_top1_acc:
                    best_top1_acc = top1_acc
                    best_top1_epoch = epoch_id
                    model_path = os.path.join(config.model_save_dir,
                                              config.ARCHITECTURE["name"])
                    save_model(net, optimizer, model_path, "best_model")
                message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
                    best_top1_acc, best_top1_epoch)
                logger.info(message)

            # 3. save the persistable model
            if epoch_id % config.save_interval == 0:
                model_path = os.path.join(config.model_save_dir,
                                          config.ARCHITECTURE["name"])
                save_model(net, optimizer, model_path, epoch_id)
    except Exception as e:
        logger.error(e)
    finally:
        vdl_writer.close() if vdl_writer else None