Exemple #1
0
def modifier(args, epoch, model):
    if epoch == 0:
        set_model_prune_rate(model, prune_rate=0.0)
        freeze_model_subnet(model)
        unfreeze_model_weights(model)

    elif epoch == 6:
        set_model_prune_rate(model, prune_rate=args.prune_rate)
        unfreeze_model_subnet(model)
        freeze_model_weights(model)

        save_checkpoint(
            {
                "epoch": epoch,
                "arch": args.arch,
                "state_dict": model.state_dict(),
                "best_acc1": 0.0,
                "best_acc5": 0.0,
                "best_train_acc1": 0.0,
                "best_train_acc5": 0.0,
                "curr_acc1": "Not evaluated",
            },
            False,
            filename=args.ckpt_base_dir / f"epoch_2.state",
            save=True,
        )
Exemple #2
0
def get_model(args):
    if args.first_layer_dense:
        args.first_layer_type = "DenseConv"

    print("=> Creating model '{}'".format(args.arch))
    model = models.__dict__[args.arch]()

    # applying sparsity to the network
    if (
        args.conv_type != "DenseConv"
        and args.conv_type != "SampleSubnetConv"
        and args.conv_type != "ContinuousSparseConv"
    ):
        if args.prune_rate < 0:
            raise ValueError("Need to set a positive prune rate")

        set_model_prune_rate(model, prune_rate=args.prune_rate)
        print(
            f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
        )

    # freezing the weights if we are only doing subnet training
    if args.freeze_weights:
        freeze_model_weights(model)

    return model
Exemple #3
0
def resume_finetuning(args, model):
    if args.resume:
        if os.path.isfile(args.resume):
            print(
                "############################ WELCOME to the FINETUNING MODE!! ############################"
            )
            # Load the pretrained model
            print(f"=> Loading checkpoint '{args.resume}'")
            checkpoint = torch.load(args.resume,
                                    map_location=f"cuda:{args.multigpu[0]}")
            model.load_state_dict(checkpoint["state_dict"])
        else:
            print(f"=> No checkpoint found at '{args.resume}'")
            raise

    # Freeze the weights & unfreeze the scores
    unfreeze_model_weights(model)
    freeze_model_subnet(model)

    # Set the pruning rate
    set_model_prune_rate(
        model,
        prune_rate=args.prune_rate)  # Don't need to specify the prune_rate

    # Reset the Optimizer
    print(
        "Under the pruning setting, the weights/scores' gradient(s) are as follows:"
    )
    print("You should see that weights are UNFROZEN and scores are FROZEN")
    print(
        "############################ DEBUG PRINT ############################"
    )

    #get_optimizer(args,model)
    print(
        f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
    )
    print(
        "############################ FINETUNING: EPOCHS = {} ############################"
        .format(args.epochs))

    args.start_epoch = 0  # this quantity was modified at line76 & line260, we use the DEFAULT epochs = 20
    # Reset the exp-name
    args.name = args.name + "_finetuning"  # Change the exp-name so that the new model can be saved to a new directory
    model = set_gpu(args, model)

    return get_optimizer(args, model)