예제 #1
0
def train_classifier(networks, optimizers, dataloader, epoch=None, **options):
    for net in networks.values():
        net.train()
    netC = networks['classifier_kplusone']
    optimizerC = optimizers['classifier_kplusone']
    batch_size = options['batch_size']
    image_size = options['image_size']

    dataset_filename = options.get('aux_dataset')
    if not dataset_filename or not os.path.exists(dataset_filename):
        raise ValueError("Aux Dataset not available")
    print("Using aux_dataset {}".format(dataset_filename))
    aux_dataloader = FlexibleCustomDataloader(dataset_filename, batch_size=batch_size, image_size=image_size)

    for i, (images, class_labels) in enumerate(dataloader):
        images = Variable(images).cuda()
        labels = Variable(class_labels).cuda()

        ############################
        # Classifier Update
        ############################
        netC.zero_grad()

        # Classify real examples into the correct K classes
        classifier_logits = netC(images)
        augmented_logits = F.pad(classifier_logits, (0, 1))
        # _, labels_idx = labels.max(dim=1)
        labels_idx = labels
        errC = F.nll_loss(F.log_softmax(augmented_logits, dim=1), labels_idx)
        errC.backward()
        # log.collect('Classifier Loss', errC)

        # Classify aux_dataset examples as open set
        aux_images, aux_labels = aux_dataloader.get_batch()
        classifier_logits = netC(Variable(aux_images))
        augmented_logits = F.pad(classifier_logits, (0, 1))
        log_soft_open = F.log_softmax(augmented_logits, dim=1)[:, -1]
        errOpenSet = -log_soft_open.mean()
        errOpenSet.backward()
        # log.collect('Open Set Loss', errOpenSet)

        optimizerC.step()
        ############################

        # Keep track of accuracy on positive-labeled examples for monitoring
        # log.collect_prediction('Classifier Accuracy', netC(images), labels)

        # log.print_every()
    results = {
        'errC': errC.item(),
        'errOpenSet': errOpenSet.item(),
    }

    return results
예제 #2
0
def train_classifier(networks, optimizers, dataloader, epoch=None, **options):
    for net in networks.values():
        net.train()
    netD = networks['discriminator']
    optimizerD = optimizers['discriminator']
    result_dir = options['result_dir']
    batch_size = options['batch_size']
    image_size = options['image_size']
    latent_size = options['latent_size']

    # Hack: use a ground-truth dataset to test
    #dataset_filename = '/mnt/data/svhn-59.dataset'
    dataset_filename = os.path.join(options['result_dir'], 'aux_dataset.dataset')
    aux_dataloader = FlexibleCustomDataloader(dataset_filename, batch_size=batch_size, image_size=image_size)

    start_time = time.time()
    correct = 0
    total = 0

    for i, (images, class_labels) in enumerate(dataloader):
        images = Variable(images)
        labels = Variable(class_labels)

        ############################
        # Discriminator Updates
        ###########################
        netD.zero_grad()

        # Classify real examples into the correct K classes
        real_logits = netD(images)
        positive_labels = (labels == 1).type(torch.cuda.FloatTensor)
        augmented_logits = F.pad(real_logits, pad=(0,1))
        augmented_labels = F.pad(positive_labels, pad=(0,1))
        log_likelihood = F.log_softmax(augmented_logits, dim=1) * augmented_labels
        errC = -0.5 * log_likelihood.mean()

        # Classify the user-labeled (active learning) examples
        aux_images, aux_labels = aux_dataloader.get_batch()
        aux_images = Variable(aux_images)
        aux_labels = Variable(aux_labels)
        aux_logits = netD(aux_images)
        augmented_logits = F.pad(aux_logits, pad=(0,1))
        augmented_labels = F.pad(aux_labels, pad=(0, 1))
        augmented_positive_labels = (augmented_labels == 1).type(torch.FloatTensor).cuda()
        is_positive = (aux_labels.max(dim=1)[0] == 1).type(torch.FloatTensor).cuda()
        is_negative = 1 - is_positive
        fake_log_likelihood = F.log_softmax(augmented_logits, dim=1)[:,-1] * is_negative
        #real_log_likelihood = augmented_logits[:,-1].abs() * is_positive
        real_log_likelihood = (F.log_softmax(augmented_logits, dim=1) * augmented_positive_labels).sum(dim=1)
        errC -= fake_log_likelihood.mean() 
        errC -= 0.5 * real_log_likelihood.mean()

        errC.backward()
        optimizerD.step()
        ############################

        # Keep track of accuracy on positive-labeled examples for monitoring
        _, pred_idx = real_logits.max(1)
        _, label_idx = labels.max(1)
        correct += sum(pred_idx == label_idx).data.cpu().numpy()[0]
        total += len(labels)

        if i % 100 == 0:
            bps = (i+1) / (time.time() - start_time)
            ed = 0#errD.data[0]
            eg = 0#errG.data[0]
            ec = errC.data[0]
            acc = correct / max(total, 1)
            msg = '[{}][{}/{}] D:{:.3f} G:{:.3f} C:{:.3f} Acc. {:.3f} {:.3f} batch/sec'
            msg = msg.format(
                  epoch, i+1, len(dataloader),
                  ed, eg, ec, acc, bps)
            print(msg)
            print("Accuracy {}/{}".format(correct, total))
    return True
예제 #3
0
def train_classifier(networks, optimizers, dataloader, epoch=None, **options):
    for net in networks.values():
        net.train()
    netC = networks['classifier_kplusone']
    optimizerC = optimizers['classifier_kplusone']
    batch_size = options['batch_size']
    image_size = options['image_size']

    dataset_filename = options.get('aux_dataset')
    if not dataset_filename or not os.path.exists(dataset_filename):
        raise ValueError("Aux Dataset not available")
    print("Using aux_dataset {}".format(dataset_filename))
    aux_dataloader = FlexibleCustomDataloader(dataset_filename,
                                              batch_size=batch_size,
                                              image_size=image_size)

    loss_class = losses.losses()

    for i, (images, class_labels) in enumerate(dataloader):
        images = Variable(images)
        # Following line FOR MNIST ONLY!!!!!!!! Remove otherwise
        #images = T.Pad(2).forward(images)
        labels = Variable(class_labels)

        ############################
        # Classifier Update
        ############################
        netC.zero_grad()

        # Classify real examples into the correct K classes
        #classifier_logits = netC(images)
        #augmented_logits = F.pad(classifier_logits, (0,1))
        #_, labels_idx = labels.max(dim=1)
        # TODO:: Replace with Matt's loss function ::
        #errC = F.nll_loss(F.log_softmax(augmented_logits, dim=1), labels_idx)
        #errC.backward()
        classifier_logits = netC(images)
        _, labels_idx = labels.max(dim=1)
        #errC = loss_class.kliep_loss(classifier_logits, labels_idx)
        errC = loss_class.power_loss_05(classifier_logits, labels_idx)
        errC.backward()

        log.collect('Classifier Loss', errC)

        # Classify aux_dataset examples as open set
        aux_images, aux_labels = aux_dataloader.get_batch()
        #classifier_logits = netC(Variable(aux_images))
        #augmented_logits = F.pad(classifier_logits, (0,1))
        #log_soft_open = F.log_softmax(augmented_logits, dim=1)[:, -1]
        #errOpenSet = -log_soft_open.mean()
        #errOpenSet.backward()
        classifier_logits = netC(Variable(aux_images))
        augmented_logits = F.pad(classifier_logits, (0, 1))
        target_label = Variable(torch.LongTensor(
            classifier_logits.shape[0])).cuda()
        target_label[:] = classifier_logits.shape[1]  #outputs.shape[1]
        #densityratio_loss = loss_class.kliep_loss(augmented_logits, target_label)
        densityratio_loss = loss_class.power_loss_05(augmented_logits,
                                                     target_label)
        densityratio_loss.backward()

        log.collect('Open Set Loss', densityratio_loss)

        optimizerC.step()
        ############################

        # Keep track of accuracy on positive-labeled examples for monitoring
        log.collect_prediction('Classifier Accuracy', netC(images), labels)

        log.print_every()

    return True