示例#1
0
        'params': model.parameters()
    }, {
        'params': classifier.parameters()
    }],
                           lr=lr,
                           weight_decay=args.weight_decay)

    # default value
    it = -1  # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
    best_prec1 = 0
    best_auc1 = 0
    start_epoch = 1

    # load status from checkpoint
    if args.model_checkpoint is not None:
        checkpoint_status = pt_utils.load_checkpoint(
            model, optimizer, filename=args.model_checkpoint.split(".")[0])
        if checkpoint_status is not None:
            it, start_epoch, best_loss = checkpoint_status
    if args.cls_checkpoint is not None:
        checkpoint_status = pt_utils.load_checkpoint(
            classifier, optimizer, filename=args.cls_checkpoint.split(".")[0])
        if checkpoint_status is not None:
            it, start_epoch, best_loss = checkpoint_status

    for param_group in optimizer.param_groups:
        param_group['lr'] = 1e-4
    it = max(it, 0)  # for the initialize value of `trainer.train`

    if not osp.isdir("checkpoints"):
        os.makedirs("checkpoints")
    ## rewrite the training process
        lr_clip / args.lr,
    )
    bn_lbmd = lambda it: max(
        args.bn_momentum * args.bnm_decay**
        (int(it * args.batch_size / args.decay_step)),
        bnm_clip,
    )

    # default value
    it = -1  # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
    best_loss = 1e10
    start_epoch = 1

    # load status from checkpoint
    if args.checkpoint is not None:
        checkpoint_status = pt_utils.load_checkpoint(
            model, optimizer, filename=args.checkpoint.split(".")[0])
        if checkpoint_status is not None:
            it, start_epoch, best_loss = checkpoint_status

    lr_scheduler = lr_sched.LambdaLR(optimizer,
                                     lr_lambda=lr_lbmd,
                                     last_epoch=it)
    bnm_scheduler = pt_utils.BNMomentumScheduler(model,
                                                 bn_lambda=bn_lbmd,
                                                 last_epoch=it)
    model = nn.DataParallel(model)

    it = max(it, 0)  # for the initialize value of `trainer.train`

    model_fn = model_fn_decorator(nn.CrossEntropyLoss())
    model = Pointnet(num_classes=13, input_channels=6, use_xyz=True)
    model.cuda()
    optimizer = optim.Adam(
        model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    lr_lbmd = lambda it: max(args.lr_decay**(int(it * args.batch_size / args.decay_step)), lr_clip / args.lr)
    bnm_lmbd = lambda it: max(args.bn_momentum * args.bn_decay**(int(it * args.batch_size / args.decay_step)), bnm_clip)

    if args.checkpoint is None:
        lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
        bnm_scheduler = pt_utils.BNMomentumScheduler(model, bnm_lmbd)
        start_epoch = 1
        best_prec = 0
        best_loss = 1e10
    else:
        start_epoch, best_loss = pt_utils.load_checkpoint(
            model, optimizer, filename=args.checkpoint.split(".")[0])

        lr_scheduler = lr_sched.LambdaLR(
            optimizer, lr_lbmd, last_epoch=start_epoch)
        bnm_scheduler = pt_utils.BNMomentumScheduler(
            model, bnm_lmbd, last_epoch=start_epoch)

    model_fn = model_fn_decorator(nn.CrossEntropyLoss())

    if args.visdom:
        viz = pt_utils.VisdomViz(port=args.visdom_port)
    else:
        viz = pt_utils.CmdLineViz()

    viz.text(pprint.pformat(vars(args)))