Example #1
0
def modifier(args, epoch, model):
    if epoch == 0:
        set_model_prune_rate(model, prune_rate=0.0)
        freeze_model_subnet(model)
        unfreeze_model_weights(model)

    elif epoch == 6:
        set_model_prune_rate(model, prune_rate=args.prune_rate)
        unfreeze_model_subnet(model)
        freeze_model_weights(model)

        save_checkpoint(
            {
                "epoch": epoch,
                "arch": args.arch,
                "state_dict": model.state_dict(),
                "best_acc1": 0.0,
                "best_acc5": 0.0,
                "best_train_acc1": 0.0,
                "best_train_acc5": 0.0,
                "curr_acc1": "Not evaluated",
            },
            False,
            filename=args.ckpt_base_dir / f"epoch_2.state",
            save=True,
        )
Example #2
0
def get_model(args):
    if args.first_layer_dense:
        args.first_layer_type = "DenseConv"

    print("=> Creating model '{}'".format(args.arch))
    model = models.__dict__[args.arch]()

    # applying sparsity to the network
    if (
        args.conv_type != "DenseConv"
        and args.conv_type != "SampleSubnetConv"
        and args.conv_type != "ContinuousSparseConv"
    ):
        if args.prune_rate < 0:
            raise ValueError("Need to set a positive prune rate")

        set_model_prune_rate(model, prune_rate=args.prune_rate)
        print(
            f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
        )

    # freezing the weights if we are only doing subnet training
    if args.freeze_weights:
        freeze_model_weights(model)

    return model
Example #3
0
def resume_pruning(args, model):
    if os.path.isfile(args.resume):
        print(
            "############################ WELCOME to the PRUNING MODE!! ############################"
        )
        # Load the pretrained model
        print(f"=> Loading checkpoint '{args.resume}'")
        checkpoint = torch.load(args.resume,
                                map_location=f"cuda:{args.multigpu[0]}")
        model.load_state_dict(
            checkpoint["state_dict"]
        )  # Read the weights & scores, use the prune-rate in the config file, i.e. 0

        # Freeze the weights & unfreeze the scores
        freeze_model_weights(model)
        unfreeze_model_subnet(model)

        # Set the pruning rate
        set_model_prune_rate(
            model,
            prune_rate=args.prune_rate)  # Need to specify the prune_rate

        # Reset the Optimizer
        print(
            "Under the pruning setting, the weights/scores' gradient(s) are as follows:"
        )
        print("You should see that weights are FROZEN and scores are UNFROZEN")
        print(
            "############################ DEBUG PRINT ##########################"
        )

        #get_optimizer(args,model)
        print(
            f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
        )

        # Reset the start_epoch and epochs, we use the DEFAULT epochs = 100 this quantity was modified at line76 & line260
        args.start_epoch = 0  # this quantity was modified at line76 & line260, we use the DEFAULT epochs = 20

        # Reset the exp-name
        args.name = args.name + "_pruning"  # Change the exp-name so that the new model can be saved to a new directory
        model = set_gpu(args, model)
    else:
        print(f"=> No checkpoint found at '{args.resume}'")
    return get_optimizer(args, model)