コード例 #1
0
ファイル: run.py プロジェクト: zengyu714/landmark-recognition
def finetune_stage():
    """3-stage finetune
        Step 1: freeze parameters of backbone to extract features
        Step 2: train last three dense `Linear-Linear-Softmax` layers
        Step 3: train bottom 9 layers, starting from `conv4_x`
    """

    modelname = args.modelname
    nickname = args.nickname

    # Visualization
    vis = Visdom(env=nickname)

    # Landmark object
    landmark = Landmark(
        modelname,
        nickname,
        load_dataset,
        vis,
        pretrained=True,  # use pretrained model
        use_stage=True,
        device=device,
        lr=args.lr,
        epochs=args.tot_epochs,
        step_size=args.step_size,
        batch_size=args.batch_size,
        input_size=args.input_size,
        optim_params=args.optim_params)

    print_basic_params(landmark)

    try:
        landmark.resume(f"./checkpoints/{landmark.nickname}_best.ckpt")
    except FileNotFoundError:
        pass

    landmark.model = landmark.model.to(
        device)  # move the model parameters to CPU/GPU

    # stage 1 - 3
    for e in range(landmark.cur_epoch, args.stage_epoch):
        landmark.cur_epoch = e + 1
        for loader_index in range(len(landmark.loader_train_sets)):
            landmark.train(loader_index)
            landmark.save(loader_index)
        landmark.val()
        landmark.scheduler.step()

    unfreeze_resnet50_bottom(landmark)
    for e in range(max(args.stage_epoch, landmark.cur_epoch),
                   landmark.tot_epochs):
        landmark.cur_epoch = e + 1
        for loader_index in range(len(landmark.loader_train_sets)):
            landmark.train(loader_index)
            landmark.save(loader_index)
        landmark.val()
        landmark.scheduler.step()
コード例 #2
0
ファイル: run.py プロジェクト: zengyu714/landmark-recognition
def run(pretrain=True):
    """
       If pretraine is True, we tune All parameters.
       Otehrwise, just train the brand new model.
    """

    modelname = args.modelname
    nickname = args.nickname

    # Visualization
    vis = Visdom(env=nickname)

    # Landmark object
    landmark = Landmark(
        modelname,
        nickname,
        load_dataset,
        vis,
        pretrained=pretrain,  # determine finetune or not
        use_stage=False,
        device=device,
        lr=args.lr,
        epochs=args.tot_epochs,
        step_size=args.step_size,
        batch_size=args.batch_size,
        input_size=args.input_size,
        optim_params=args.optim_params)

    print_basic_params(landmark)

    try:
        landmark.resume(f"./checkpoints/{landmark.nickname}_best.ckpt")
        # landmark.resume(f"./checkpoints/{landmark.nickname}_newest.ckpt")
    except FileNotFoundError:
        pass

    landmark.model = landmark.model.to(
        device)  # move the model parameters to CPU/GPU
    for e in range(landmark.cur_epoch, landmark.tot_epochs):
        landmark.cur_epoch = e + 1
        for loader_index in range(len(landmark.loader_train_sets)):
            landmark.train(loader_index)
            landmark.save(loader_index)
        landmark.val()
        if landmark.scheduler:
            landmark.scheduler.step()