def main():
    batch_size = 32
    test_batch_size = 32

    p = os.path.expanduser("./image-clef/p")
    c = os.path.expanduser("./image-clef/c")
    i = os.path.expanduser("./image-clef/i")

    epochs = 200
    lr = 0.0001
    device = torch.device("cuda")

    train_loader_source = DA_datasets.imageclef_train_loader(p, batch_size, 0)
    train_loader_target = DA_datasets.imageclef_train_loader(c, batch_size, 0)
    testloader_1_target = DA_datasets.imageclef_test_loader(
        c, test_batch_size, 0)

    model_dan = DAN_model.DANNet_ResNet(ResNet.resnet50, True).to(device)
    #model_dan = DAN_model.DANNetVGG16(models.vgg16, True).to(device)

    optimizer = torch.optim.SGD(model_dan.parameters(),
                                momentum=0.9,
                                lr=lr,
                                weight_decay=5e-4)
    dan_train(epochs,
              lr,
              model_dan,
              train_loader_source,
              device,
              train_loader_target,
              testloader_1_target,
              optimizer,
              scheduler=None,
              is_debug=False)
Exemple #2
0
def main():
    batch_size = 32
    test_batch_size = 32
    lr = 0.0005
    momentum = 0.9
    epochs = 200
    epoch_step = 30
    weight_decay = 5e-4
    teacher_pretrained_path = "./da_resnet50_p_i.pth"
    student_pretrained = False
    device = torch.device("cuda")

    p = os.path.expanduser("./image-clef/p")
    c = os.path.expanduser("./image-clef/c")
    i = os.path.expanduser("./image-clef/i")

    train_loader_source = DA_datasets.imageclef_train_loader(p, batch_size, 0)
    train_loader_target = DA_datasets.imageclef_train_loader(i, batch_size, 0)
    testloader_target = DA_datasets.imageclef_test_loader(
        i, test_batch_size, 0)

    teacher_model = DAN_model.DANNet_ResNet(ResNet.resnet50, False)
    student_model = DAN_model.DANNet_ResNet(ResNet.resnet34,
                                            student_pretrained)
    teacher_model = teacher_model.to(device)
    student_model = student_model.to(device)
    if teacher_pretrained_path != "":
        teacher_model.load_state_dict(
            torch.load(teacher_pretrained_path)['student_model'])

    if torch.cuda.device_count() > 1:
        teacher_model = torch.nn.DataParallel(teacher_model).to(device)
        student_model = torch.nn.DataParallel(student_model).to(device)

    distiller_model = distiller.Distiller(teacher_model,
                                          student_model).to(device)

    if torch.cuda.device_count() > 1:
        distiller_model = torch.nn.DataParallel(distiller_model).to(device)

    optimizer = torch.optim.SGD(list(distiller_model.s_net.parameters()) +
                                list(distiller_model.Connectors.parameters()),
                                lr,
                                momentum=momentum,
                                weight_decay=weight_decay)

    od_kd_without_label(epochs, distiller_model, optimizer,
                        train_loader_target, testloader_target, device)
def train_normal():
    batch_size = 32
    test_batch_size = 32
    lr = 0.001
    momentum = 0.9
    epochs = 200
    weight_decay = 5e-4
    device = torch.device("cuda")

    p = os.path.expanduser("./image-clef/p")
    c = os.path.expanduser("./image-clef/c")
    i = os.path.expanduser("./image-clef/i")

    train_dataset = DA_datasets.imageclef_train_loader(p, batch_size, 0)
    test_dataset = DA_datasets.imageclef_test_loader(p, test_batch_size, 0)
    model = DAN_model.DANNet_ResNet(ResNet.resnet50, True)
    model = model.to(device)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                momentum=momentum,
                                weight_decay=weight_decay)
    criterion = nn.CrossEntropyLoss()
    best_acc = 0.0
    for epoch in range(epochs):
        running_loss = 0.0
        for i, data in enumerate(dataset, 0):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        acc = eval(model, device, test_dataset)
        if (acc > best_acc):
            best_acc = acc
            torch.save(model, 'teacher_model_p.pth')
        print(f'epoch : {epoch}, acc : {acc}')