def new_fresh_model(args, DEVICE):
    """
    Reload a new-fresh model.
    useful to restart training with certainty that the model is at the same
    exact state at t=0.
    :param args: object.
    :param DEVICE: torch device.
    :return:
    """
    ALLOW_MULTIGPUS = check_if_allow_multgpu_mode()
    NBRGPUS = torch.cuda.device_count()
    # ========================= Instantiate models =============================
    model = instantiate_models(args)

    # Check if we are using a user specific pre-trained model other than our
    # pre-defined pre-trained models. This can be used to to EVALUATE a
    # trained model. You need to set args.max_epochs to -1 so no training is
    # performed. This is a hack to avoid creating other function to deal with
    # LATER-evaluation after this code is done. This script is intended for
    # training. We evaluate at the end. However, if you missed something
    # during the training/evaluation (for example plot something over the
    # predicted images), you do not need to re-train the model. You can 1.
    # specify the path to the pre-trained model. 2. Set max_epochs to -1. 3.
    # Set strict to True. By doing this, we load the pre-trained model, and,
    # we skip the training loop, fast-forward to the evaluation.

    if args.model['path_pre_trained'] not in [None, 'None']:
        warnings.warn("You have asked to load a specific pre-trained "
                      "model from {} .... [OK]".format(
            args.model['path_pre_trained']))

        model = load_pre_pretrained_model(
            model=model,
            path_file=args.model['path_pre_trained'],
            strict=args.model['strict'],
            freeze_classifier=args.freeze_classifier
        )

    # Check if there are multiple GPUS.
    if ALLOW_MULTIGPUS:
        model = MyDataParallel(model)
        if args.batch_size < NBRGPUS:
            warnings.warn("You asked for MULTIGPU mode. However, "
                          "your batch size {} is smaller than the number of "
                          "GPUs available {}. This is fine in practice. "
                          "However, some GPUs will be idol. "
                          "This is just a warning .... "
                          "[OK]".format(args.batch_size, NBRGPUS))
    model.to(DEVICE)
    # freeze the classifier if needed
    if args.freeze_classifier:
        warnings.warn("You asked to freeze the classifier."
                      "We are going to do it right now.")
        model.freeze_cl()
        assert model.assert_cl_is_frozen(), "Something is wrong"
    # ==========================================================================

    return model
Beispiel #2
0
# thread-safe.

import reproducibility
import constants

ACTIVATE_SYNC_BN = True
# Override ACTIVATE_SYNC_BN using variable environment in Bash:
# $ export ACTIVATE_SYNC_BN="True"   ----> Activate
# $ export ACTIVATE_SYNC_BN="False"   ----> Deactivate

if "ACTIVATE_SYNC_BN" in os.environ.keys():
    ACTIVATE_SYNC_BN = (os.environ['ACTIVATE_SYNC_BN'] == "True")

announce_msg("ACTIVATE_SYNC_BN was set to {}".format(ACTIVATE_SYNC_BN))

if check_if_allow_multgpu_mode() and ACTIVATE_SYNC_BN:  # Activate Synch-BN.
    from deeplearning.syncbn import nn as NN_Sync_BN
    BatchNorm2d = NN_Sync_BN.BatchNorm2d
    announce_msg("Synchronized BN has been activated. \n"
                 "MultiGPU mode has been activated. "
                 "{} GPUs".format(torch.cuda.device_count()))
else:
    BatchNorm2d = nn.BatchNorm2d
    if check_if_allow_multgpu_mode():
        announce_msg("Synchronized BN has been deactivated.\n"
                     "MultiGPU mode has been activated. "
                     "{} GPUs".format(torch.cuda.device_count()))
    else:
        announce_msg("Synchronized BN has been deactivated.\n"
                     "MultiGPU mode has been deactivated. "
                     "{} GPUs".format(torch.cuda.device_count()))
# "Oxford-flowers-102"
DEBUG_MODE = False
# dataset to go fast. If True, we select only few samples for training
# , validation, and test.

FRQ = 2.  # number of times to plot the train figures.

# =============================================
# Parse the inputs and deal with the yaml file.
# init. reproducibility.
# =============================================
args, args_dict, input_args = parse_input()

NBRGPUS = torch.cuda.device_count()

ALLOW_MULTIGPUS = check_if_allow_multgpu_mode()


if __name__ == "__main__":
    init_time = dt.datetime.now()

    # ==================================================
    # Device, criteria, folders, output logs, callbacks.
    # ==================================================

    DEVICE = get_device(args)
    CPUDEVICE = get_cpu_device()

    CRITERION = instantiate_loss(args).to(DEVICE)

    # plot freq.
Beispiel #4
0
    ds = constants.CAM16
    announce_msg("Processing dataset: {}".format(ds))
    args = {
        "baseurl": get_rootpath_2_dataset(Dict2Obj({'dataset': ds})),
        "dataset": ds,
        "fold_folder": "folds/{}".format(ds),
        "img_extension": "jpg",
        "path_encoding": "folds/{}/encoding-origine.yaml".format(ds)
    }
    # Convert masks into binary masks: already done.
    # create_bin_mask_Oxford_flowers_102(Dict2Obj(args))
    reproducibility.init_seed()
    al_split_camelyon16(Dict2Obj(args))
    # get_stats(Dict2Obj(args), split=0, fold=0, subset='train')


if __name__ == "__main__":
    check_if_allow_multgpu_mode()

    # ==========================================================================
    #                          ACTIVE LEARNING
    # ==========================================================================
    do_glas()
    do_Caltech_UCSD_Birds_200_2011()
    # do_Oxford_flowers_102()
    # do_camelyon16()
    # ==========================================================================
    #                       END: ACTIVE LEARNING
    # ==========================================================================