Exemplo n.º 1
0
def init_model(args):
    import os
    print(os.getcwd())
    source_train_loader = get_loader(name_dataset=args.source,
                                     batch_size=args.batch_size,
                                     train=True,
                                     path="../../data/OfficeCaltech/images/")
    target_train_loader = get_loader(name_dataset=args.target,
                                     batch_size=args.batch_size,
                                     train=True,
                                     path="../../data/OfficeCaltech/images/")

    source_evaluate_loader = get_loader(
        name_dataset=args.source,
        batch_size=args.batch_size,
        train=False,
        path="../../data/OfficeCaltech/images/")
    target_evaluate_loader = get_loader(
        name_dataset=args.target,
        batch_size=args.batch_size,
        train=False,
        path="../../data/OfficeCaltech/images/")

    n_classes = len(source_train_loader.dataset.classes)

    # ~ Paper : "We initialized the other layers with the parameters pre-trained on ImageNet"
    # check https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
    model = alexnet(pretrained=True)
    # ~ Paper : The dimension of last fully connected layer (fc8) was set to the number of categories (31)
    model.classifier[6] = nn.Linear(4096, n_classes)
    # ~ Paper : and initialized with N(0, 0.005)
    torch.nn.init.normal_(model.classifier[6].weight, mean=0, std=5e-3)

    # Initialize bias to small constant number (http://cs231n.github.io/neural-networks-2/#init)
    model.classifier[6].bias.data.fill_(0.01)

    model = model.to(device=args.device)

    # ~ Paper : "The learning rate of fc8 is set to 10 times the other layers as it was training from scratch."
    optimizer = torch.optim.SGD(
        [
            {
                'params': model.features.parameters()
            },
            {
                'params': model.classifier[:6].parameters()
            },
            # fc8 -> 7th element (index 6) in the Sequential block
            {
                'params': model.classifier[6].parameters(),
                'lr': 10 * args.lr
            }
        ],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.decay)  # if not specified, the default lr is used

    tracker = Tracker()

    for i in range(args.epochs):
        train(model, optimizer, source_train_loader, target_train_loader,
              tracker, args, i)
        evaluate(model, source_evaluate_loader, 'source', tracker, args, i)
        evaluate(model, target_evaluate_loader, 'target', tracker, args, i)

    # Save logged classification loss, coral loss, source accuracy, target accuracy
    torch.save(tracker.to_dict(), args.da_loss + "_log.pth")
    print("Final Evaluation\r")
    return evaluate(model, target_evaluate_loader, 'target', tracker, args, i)
Exemplo n.º 2
0
def main():

    # Paper: In the training phase, we set the batch size to 128,
    # base learning rate to 10−3, weight decay to 5×10−4, and momentum to 0.9

    parser = argparse.ArgumentParser(description='Train - Evaluate DeepCORAL model')
    parser.add_argument('--disable_cuda', action='store_true',
                        help='Disable CUDA')
    parser.add_argument('--epochs', type=int, default=50,
                        help='Number of total epochs to run')
    parser.add_argument('--backbone_network', type=str, default='alexnet',
                        help='Backbone CNN')
    parser.add_argument('--batch_size', type=int, default=128,
                        help='Batch size')
    parser.add_argument('--lr', default=1e-3,
                        help='Learning Rate')
    parser.add_argument('--decay', default=5e-4,
                        help='Decay of the learning rate')
    parser.add_argument('--momentum', default=0.9,
                        help="Optimizer's momentum")
    parser.add_argument('--lambda_coral', type=float, default=0.5,
                        help="Weight that trades off the adaptation with "
                             "classification accuracy on the source domain")
    parser.add_argument('--source', default='amazon',
                        help="Source Domain (dataset)")
    parser.add_argument('--target', default='webcam',
                        help="Target Domain (dataset)")

    args = parser.parse_args()
    args.device = None

    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
    else:
        args.device = torch.device('cpu')

    if args.backbone_network == 'alexnet' or args.backbone_network == 'resnet50':
        if args.source == 'ub':
            source_data_dir = '/home/alejandro/ub/journal_2019/split/domain_adaptation/ub/static/01/train'
        else:
            source_data_dir = None

        if args.target == 'thomaz':
            target_data_dir = '/home/alejandro/ub/journal_2019/split/domain_adaptation/thomaz/static/01/train'
        else:
            target_data_dir = None

        source_train_loader = get_loader(name_dataset=args.source, batch_size=args.batch_size, train=True,
                                         data_dir=source_data_dir)
        target_train_loader = get_loader(name_dataset=args.target, batch_size=args.batch_size, train=True,
                                         data_dir=target_data_dir)

        source_evaluate_loader = get_loader(name_dataset=args.source, batch_size=args.batch_size, train=False,
                                            data_dir=source_data_dir)
        target_evaluate_loader = get_loader(name_dataset=args.target, batch_size=args.batch_size, train=False,
                                            data_dir=target_data_dir)

        n_classes = len(source_train_loader.dataset.classes)
    else:
        source_train_dataset = FeaturesDataset(split_fpath='/home/alejandro/ub/journal_2019/split/domain_adaptation/ub/static/01/cached_fold-01_train.npz')
        target_train_dataset = FeaturesDataset(split_fpath='/home/alejandro/ub/journal_2019/split/domain_adaptation/thomaz/static/01/cached_fold-01_train.npz')

        source_train_loader = torch.utils.data.DataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
        target_train_loader = torch.utils.data.DataLoader(target_train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)

        source_evaluate_dataset = FeaturesDataset(split_fpath='/home/alejandro/ub/journal_2019/split/domain_adaptation/ub/static/01/cached_fold-01_train.npz')
        target_evaluate_dataset = FeaturesDataset(split_fpath='/home/alejandro/ub/journal_2019/split/domain_adaptation/thomaz/static/01/cached_fold-01_train.npz')

        source_evaluate_loader = torch.utils.data.DataLoader(source_evaluate_dataset, batch_size=args.batch_size,
                                                             shuffle=False, num_workers=4)
        target_evaluate_loader = torch.utils.data.DataLoader(target_evaluate_dataset, batch_size=args.batch_size,
                                                             shuffle=False, num_workers=4)
        n_classes = 17

    # ~ Paper : "We initialized the other layers with the parameters pre-trained on ImageNet"
    # check https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
    if args.backbone_network == 'alexnet':
        model = alexnet(pretrained=True)

        # ~ Paper : The dimension of last fully connected layer (fc8) was set to the number of categories (31)
        model.classifier[6] = nn.Linear(4096, n_classes)

        # ~ Paper : and initialized with N(0, 0.005)
        torch.nn.init.normal_(model.classifier[6].weight, mean=0, std=5e-3)

        # Initialize bias to small constant number (http://cs231n.github.io/neural-networks-2/#init)
        model.classifier[6].bias.data.fill_(0.01)

        model = model.to(device=args.device)

        # ~ Paper : "The learning rate of fc8 is set to 10 times the other layers as it was training from scratch."
        optimizer = torch.optim.SGD([
            {'params': model.features.parameters()},
            {'params': model.classifier[:6].parameters()},
            {'params': model.classifier[6].parameters(), 'lr': 10 * args.lr}
        ], lr=args.lr, momentum=args.momentum)  # if not specified, the default lr is used

    elif args.backbone_network == 'resnet50':
        model = resnet50(pretrained=True)

        # ~ Paper : The dimension of last fully connected layer (fc8) was set to the number of categories (31)
        model.fc = nn.Linear(2048, n_classes)

        # ~ Paper : and initialized with N(0, 0.005)
        torch.nn.init.normal_(model.fc.weight, mean=0, std=5e-3)

        # Initialize bias to small constant number (http://cs231n.github.io/neural-networks-2/#init)
        model.fc.bias.data.fill_(0.01)

        model = model.to(device=args.device)

        # ~ Paper : "The learning rate of fc8 is set to 10 times the other layers as it was training from scratch."
        optimizer = torch.optim.SGD([
            {'params': model.layer4.parameters()},
            {'params': model.fc.parameters(), 'lr': 10 * args.lr}
        ], lr=args.lr, momentum=args.momentum)  # if not specified, the default lr is used
    else:
        model = FrozenCNN()

        # ~ Paper : The dimension of last fully connected layer (fc8) was set to the number of categories (31)
        model.classifier[0] = nn.Linear(2048, n_classes)

        # ~ Paper : and initialized with N(0, 0.005)
        torch.nn.init.normal_(model.classifier[0].weight, mean=0, std=5e-3)

        # Initialize bias to small constant number (http://cs231n.github.io/neural-networks-2/#init)
        model.classifier[0].bias.data.fill_(0.01)

        model = model.to(device=args.device)

        # ~ Paper : "The learning rate of fc8 is set to 10 times the other layers as it was training from scratch."
        optimizer = torch.optim.SGD([
            {'params': model.classifier[0].parameters(), 'lr': 10 * args.lr}
        ], lr=args.lr, momentum=args.momentum)  # if not specified, the default lr is used

    tracker = Tracker()

    for i in range(args.epochs):
        train(model, optimizer, source_train_loader, target_train_loader, tracker, args, i)
        evaluate(model, source_evaluate_loader, 'source', tracker, args, i)
        evaluate(model, target_evaluate_loader, 'target', tracker, args, i)

    # Save logged classification loss, coral loss, source accuracy, target accuracy
    log_file = "{}_coral-loss:{}_{}-{}_log.pth".format(args.backbone_network, args.lambda_coral, args.source,
                                                       args.target)
    torch.save(tracker.to_dict(), log_file)
Exemplo n.º 3
0
def main():

    # Paper: In the training phase, we set the batch size to 128,
    # base learning rate to 10−3, weight decay to 5×10−4, and momentum to 0.9

    parser = argparse.ArgumentParser(
        description='Train - Evaluate DeepCORAL model')
    parser.add_argument('--disable_cuda',
                        action='store_true',
                        help='Disable CUDA')
    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        help='Number of total epochs to run')
    parser.add_argument('--batch_size',
                        type=int,
                        default=128,
                        help='Batch size')
    parser.add_argument('--lr', default=1e-3, help='Learning Rate')
    parser.add_argument('--decay',
                        default=5e-4,
                        help='Decay of the learning rate')
    parser.add_argument('--momentum', default=0.9, help="Optimizer's momentum")
    parser.add_argument('--lambda_coral',
                        type=float,
                        default=0.5,
                        help="Weight that trades off the adaptation with "
                        "classification accuracy on the source domain")
    parser.add_argument('--source',
                        default='amazon',
                        help="Source Domain (dataset)")
    parser.add_argument('--target',
                        default='webcam',
                        help="Target Domain (dataset)")

    args = parser.parse_args()
    args.device = None

    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda')
    else:
        args.device = torch.device('cpu')

    source_train_loader = get_loader(name_dataset=args.source,
                                     batch_size=args.batch_size,
                                     train=True)
    target_train_loader = get_loader(name_dataset=args.target,
                                     batch_size=args.batch_size,
                                     train=True)

    source_evaluate_loader = get_loader(name_dataset=args.source,
                                        batch_size=args.batch_size,
                                        train=False)
    target_evaluate_loader = get_loader(name_dataset=args.target,
                                        batch_size=args.batch_size,
                                        train=False)

    n_classes = len(source_train_loader.dataset.classes)

    # ~ Paper : "We initialized the other layers with the parameters pre-trained on ImageNet"
    # check https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
    model = alexnet(pretrained=True)
    # ~ Paper : The dimension of last fully connected layer (fc8) was set to the number of categories (31)
    model.classifier[6] = nn.Linear(4096, n_classes)
    # ~ Paper : and initialized with N(0, 0.005)
    torch.nn.init.normal_(model.classifier[6].weight, mean=0, std=5e-3)

    # Initialize bias to small constant number (http://cs231n.github.io/neural-networks-2/#init)
    model.classifier[6].bias.data.fill_(0.01)

    model = model.to(device=args.device)

    # ~ Paper : "The learning rate of fc8 is set to 10 times the other layers as it was training from scratch."
    optimizer = torch.optim.SGD(
        [
            {
                'params': model.features.parameters()
            },
            {
                'params': model.classifier[:6].parameters()
            },
            # fc8 -> 7th element (index 6) in the Sequential block
            {
                'params': model.classifier[6].parameters(),
                'lr': 10 * args.lr
            }
        ],
        lr=args.lr,
        momentum=args.momentum)  # if not specified, the default lr is used

    tracker = Tracker()

    for i in range(args.epochs):
        train(model, optimizer, source_train_loader, target_train_loader,
              tracker, args, i)
        evaluate(model, source_evaluate_loader, 'source', tracker, args, i)
        evaluate(model, target_evaluate_loader, 'target', tracker, args, i)

    # Save logged classification loss, coral loss, source accuracy, target accuracy
    torch.save(tracker.to_dict(), "log.pth")