Пример #1
0
    # ===========================================================================
    trainloader, validloader = get_train_valid_cifar10_dataloader(
        '../../data', batch_size)
    testloader = get_test_cifar10_dataloader('../../data', batch_size)
    step_size = 2 * np.int(np.floor(len(trainloader) / batch_size))
    optimizer = optim.SGD(net.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=5e-4)
    scheduler = optim.lr_scheduler.CyclicLR(optimizer,
                                            1e-5,
                                            1e-2,
                                            step_size_up=step_size)
    for epoch in range(0, epochs):
        print("Epoch:", epoch)
        # train for one epoch
        train_KD(trainloader, net, optimizer, scheduler, logits, device)

        # evaluate on validation set
        valid_score = evalation(validloader, net, device)
        print("Validation Score: ", valid_score)

        if valid_score > best_score:
            best_score = valid_score
            best_model_state_dict = net.state_dict()
            saving_dict = {
                'epoch': epoch + 1,
                'state_dict': best_model_state_dict,
                'validation_score': valid_score
            }
            torch.save(saving_dict, 'student_kd.chkpt.tar')
Пример #2
0
        student.to(device)
        if device == 'cuda':
            student.half()
        optimizer = get_optimizer(student)
        scheduler = optim.lr_scheduler.CyclicLR(optimizer,
                                                1e-5,
                                                1e-2,
                                                step_size_up=step_size)
        best_score = -np.inf
        for epoch in range(0, epochs):
            print("Epoch:", epoch)
            # train for one epoch
            train(trainloader, student, optimizer, scheduler, device)

            # evaluate on validation set
            valid_score = evalation(validloader, student, device)
            print("Validation Score: ", valid_score)

            if valid_score > best_score:
                best_score = valid_score
                best_model_state_dict = student.state_dict()
                saving_dict = {
                    'epoch': epoch + 1,
                    'state_dict': best_model_state_dict,
                    'validation_score': valid_score
                }
                torch.save(saving_dict, 'aux{}_chkpt.tar'.format(phase_idx))

        # test
        # update teacher with the best parameters
        student.load_state_dict(best_model_state_dict)
Пример #3
0
    scheduler = optim.lr_scheduler.CyclicLR(optimizer,
                                            Config.eta_min,
                                            Config.eta_max,
                                            step_size_up=step_size)
    net.to(device)
    # Print out the configuration
    best_valid_acc = -np.inf
    for epoch in range(Config.n_epochs):
        # train the network
        train(lbl_trainloader,
              net,
              optimizer,
              scheduler=scheduler,
              device=device)
        # evaluate it
        valid_acc = evalation(validloader, net, device=device, tag="Valid")
        test_acc = evalation(testloader, net, device=device, tag="Test")

        if valid_acc > best_valid_acc:
            best_valid_acc = valid_acc
            best_teacher = net.state_dict()
            corresp_test_acc = test_acc
            saving_dict = {
                'epoch': epoch,
                'state_dict': best_teacher,
                'best_valid_acc': best_valid_acc,
                'corresp_test_acc': corresp_test_acc
            }
            torch.save(
                saving_dict,
                Config.baseline_tar_fmt.format(plbl=Config.part_labeled))
Пример #4
0
    l2_reg_weight = 5e-4
    optimizer = optim.SGD(teacher.parameters(),
        lr=0.05, momentum=0.9, weight_decay=l2_reg_weight)
    # ======================================================
    trainloader, validloader = get_train_valid_cifar10_dataloader('../../data', batch_size)
    # set learning rate scheduler
    step_size = 2*np.int(np.floor(len(trainloader)/batch_size))
    scheduler = optim.lr_scheduler.CyclicLR(optimizer, 1e-5, 1e-2, step_size_up=step_size)
    best_score = -np.inf
    for epoch in range(0, nepochs):
        print("Epoch:", epoch)
        # train for one epoch
        train(trainloader, teacher, optimizer, scheduler, device)

        # evaluate on validation set
        valid_score = evalation(validloader, teacher, device)
        print("Validation Score: ", valid_score)
        if valid_score > best_score:
            # update the score
            best_score = valid_score
            best_states = teacher.state_dict()
            saving_dict = {
                'epoch': epoch+1,
                'state_dict': best_states,
                'validation_score': valid_score
            }
            # save when the performance is better
            torch.save(saving_dict, teacher_model_file)
    # ======================================================================
    # Evaluation on test
    # ======================================================================