Esempio n. 1
0
def test_loss_DANN(model_name, pretrained):
    """ Loss and model should work together without exceptions"""
    # for pretrained in [True, False]:
    dann_config.MODEL_BACKBONE = model_name
    dann_config.BACKBONE_PRETRAINED = pretrained
    if model_name == 'vanilla_dann' and pretrained:
        return
    model = DANNModel()
    batch = random_batch(unknown_proportion=0.5)
    loss_DANN(model, batch, 7, 130)

    batch = random_batch(unknown_proportion=1.00)
    loss_DANN(model, batch, 7, 130)

    batch = random_batch(unknown_proportion=0.0)
    loss_DANN(model, batch, 7, 130)
Esempio n. 2
0
    def __init__(self, metric):
        self.metric = metric

    def reset(self):
        self.metric.reset()

    def __call__(self, *args, **kwargs):
        pass
        print(f"Call metric with args:\n{args}\n{kwargs}")
        print("metric: ", self.metric(*args, **kwargs))


if __name__ == '__main__':
    train_gen_s, val_gen_s, test_gen_s = create_data_generators(
        "office-31", 'amazon', batch_size=16, infinite_train=True)

    train_gen_t, val_gen_t, test_gen_t = create_data_generators(
        "office-31", 'dslr', batch_size=16, infinite_train=True)
    model = DANNModel()
    acc = AccuracyScoreFromLogits()
    mmm = DebugMetric(acc)

    tr = Trainer(model, only_loss)
    tr.fit(train_gen_s,
           train_gen_t,
           n_epochs=5,
           validation_data=[val_gen_s, val_gen_t],
           metrics=[acc],
           steps_per_epoch=1,
           callback=simple_callback)
Esempio n. 3
0
        batch_size=dann_config.BATCH_SIZE,
        infinite_train=True,
        image_size=dann_config.IMAGE_SIZE,
        num_workers=dann_config.NUM_WORKERS,
        device=device)

    train_gen_t, val_gen_t, test_gen_t = create_data_generators(
        dann_config.DATASET,
        dann_config.TARGET_DOMAIN,
        batch_size=dann_config.BATCH_SIZE,
        infinite_train=True,
        image_size=dann_config.IMAGE_SIZE,
        num_workers=dann_config.NUM_WORKERS,
        device=device)

    model = DANNModel().to(device)
    acc = AccuracyScoreFromLogits()
    mmm = DebugMetric(acc)

    scheduler = LRSchedulerSGD()
    tr = Trainer(model, loss_DANN)
    tr.fit(train_gen_s,
           train_gen_t,
           n_epochs=dann_config.N_EPOCHS,
           validation_data=[val_gen_s, val_gen_t],
           metrics=[acc],
           steps_per_epoch=dann_config.STEPS_PER_EPOCH,
           val_freq=dann_config.VAL_FREQ,
           opt='sgd',
           opt_kwargs={
               'lr': 0.01,
Esempio n. 4
0
    parser.add_argument('--checkpoint',
                        type=str,
                        required=True,
                        help='path to model checkpoint')
    args = parser.parse_args()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    test_gen_t, _, _ = create_data_generators(
        dann_config.DATASET,
        dann_config.TARGET_DOMAIN,
        batch_size=dann_config.BATCH_SIZE,
        infinite_train=False,
        image_size=dann_config.IMAGE_SIZE,
        split_ratios=[1, 0, 0],
        num_workers=dann_config.NUM_WORKERS,
        device=device)
    model = DANNModel().to(device)
    model.load_state_dict(torch.load(args.checkpoint))
    model.eval()

    acc = AccuracyScoreFromLogits()
    tr = Trainer(model, None)
    scores = tr.score(test_gen_t, [acc])
    scores_string = '   '.join(
        ['{}: {:.5f}\t'.format(k, float(v)) for k, v in scores.items()])
    print(
        f"scores on dataset \"{dann_config.DATASET}\", domain \"{dann_config.TARGET_DOMAIN}\":"
    )
    print(scores_string)