Esempio n. 1
0
def test_clip_ensures_sorted_cutpoints():
    predictor = nn.Linear(5, 1)
    model = OrdinalLogisticModel(predictor, 4, init_cutpoints='ordered')
    # model.link.cutpoints = [-1.5, -0.5, 0.5]

    # The following is necessary to be able to manually modify the cutpoints.
    for p in model.parameters():
        p.requires_grad = False
    ascension = callbacks.AscensionCallback()

    # Make cutpoints not in sorted order
    model.link.cutpoints += torch.FloatTensor([0, 5, 0])
    # model.link.cutpoints = [-1.5, 4.5, 0.5]

    # Apply the clipper
    model.apply(ascension.clip)

    assert torch.allclose(model.link.cutpoints.data,
                          torch.FloatTensor([-1.5, 0.5, 0.5]))
def train_model(
    work_dir,
    model_cfg,
    loss_cfg,
    datasets,
    optimizer_cfg,
    batch_size,
    total_epochs,
    training_hooks,
    workflow=[('train', 1)],
    gpus=1,
    log_level=0,
    workers=4,
    resume_from=None,
    load_from=None,
    things_to_log=None,
):
    # print(all_files)
    print("==================================")

    # if loss_cfg['type'] == "spacecutter.losses.CumulativeLinkLoss":
    #     print('we have a cumulative loss')
    # else:
    #     print('we DO NOT have a cumulative loss')

    # raise ValueError("stop")

    # print(datasets)

    data_loaders = [
        torch.utils.data.DataLoader(dataset=call_obj(**d),
                                    batch_size=batch_size,
                                    shuffle=True,
                                    num_workers=workers,
                                    drop_last=False) for d in datasets
    ]

    global balance_classes
    global class_weights_dict

    if balance_classes:
        dataset_train = call_obj(**datasets[0])
        class_weights_dict = dataset_train.data_source.class_dist

    model_cfg_local = copy.deepcopy(model_cfg)
    loss_cfg_local = copy.deepcopy(loss_cfg)
    training_hooks_local = copy.deepcopy(training_hooks)
    optimizer_cfg_local = copy.deepcopy(optimizer_cfg)

    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg_local]
        model = torch.nn.Sequential(*model)

    else:
        model = call_obj(**model_cfg_local)

    if loss_cfg_local['type'] == 'spacecutter.losses.CumulativeLinkLoss':
        model = OrdinalLogisticModel(model, model_cfg_local['num_class'])

    model.apply(weights_init)
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    torch.cuda.set_device(0)
    loss = call_obj(**loss_cfg_local)

    # print('training hooks: ', training_hooks_local)
    # build runner
    optimizer = call_obj(params=model.parameters(), **optimizer_cfg_local)
    runner = Runner(model,
                    batch_processor,
                    optimizer,
                    work_dir,
                    log_level,
                    things_to_log=things_to_log)
    runner.register_training_hooks(**training_hooks_local)

    if resume_from:
        runner.resume(resume_from)
    elif load_from:
        runner.load_checkpoint(load_from)

    # run
    workflow = [tuple(w) for w in workflow]
    # [('train', 5), ('val', 1)]
    runner.run(data_loaders, workflow, total_epochs, loss=loss)
def pretrain_model(
        work_dir,
        model_cfg,
        loss_cfg,
        datasets,
        optimizer_cfg,
        batch_size,
        total_epochs,
        training_hooks,
        workflow=[('train', 1)],
        gpus=1,
        log_level=0,
        workers=4,
        resume_from=None,
        load_from=None,
        things_to_log=None,
        early_stopping=False,
        force_run_all_epochs=True,
        es_patience=10,
        es_start_up=50,
):
    print("Starting STAGE 1: Pretraining...")

    data_loaders = [
        torch.utils.data.DataLoader(dataset=call_obj(**d),
                                    batch_size=batch_size,
                                    shuffle=True,
                                    num_workers=workers,
                                    drop_last=False) for d in datasets
    ]

    global balance_classes
    global class_weights_dict

    if balance_classes:
        dataset_train =call_obj(**datasets[0])
        class_weights_dict = dataset_train.data_source.class_dist

    model_cfg_local = copy.deepcopy(model_cfg)
    loss_cfg_local = copy.deepcopy(loss_cfg)
    training_hooks_local = copy.deepcopy(training_hooks)
    optimizer_cfg_local = copy.deepcopy(optimizer_cfg)


    # put model on gpus
    if isinstance(model_cfg, list):
        model = [call_obj(**c) for c in model_cfg_local]
        model = torch.nn.Sequential(*model)

    else:
        model = call_obj(**model_cfg_local)


    if loss_cfg_local['type'] == 'spacecutter.losses.CumulativeLinkLoss':
        model = OrdinalLogisticModel(model, model_cfg_local['num_class'])


    # Step 1: Initialize the model with random weights, 
    print("THIS IS OUR MODEL")
    print(model)
    return
    model.apply(weights_init)
    model = MMDataParallel(model, device_ids=range(gpus)).cuda()
    torch.cuda.set_device(0)
    loss = call_obj(**loss_cfg_local)

    # print('training hooks: ', training_hooks_local)
    # build runner
    optimizer = call_obj(params=model.parameters(), **optimizer_cfg_local)
    runner = Runner(model, batch_processor, optimizer, work_dir, log_level, things_to_log=things_to_log, early_stopping=early_stopping, force_run_all_epochs=force_run_all_epochs, es_patience=es_patience, es_start_up=es_start_up)
    runner.register_training_hooks(**training_hooks_local)

    if resume_from:
        runner.resume(resume_from)
    elif load_from:
        runner.load_checkpoint(load_from)

    # run
    workflow = [tuple(w) for w in workflow]
    # [('train', 5), ('val', 1)]
    runner.run(data_loaders, workflow, total_epochs, loss=loss)