Exemple #1
0
def set_model(args, n_data):
    # set the model
    if args.model == 'alexnet':
        if args.view == 'Lab':
            model = alexnet(in_channel=(1, 2), feat_dim=args.feat_dim)
        elif args.view == 'Rot':
            model = alexnet(in_channel=(3, 3), feat_dim=args.feat_dim)
        elif args.view == 'LabRot':
            model = alexnet(in_channel=(1, 2), feat_dim=args.feat_dim)
        else:
            raise NotImplemented('view not implemented {}'.format(args.view))

    elif args.model.startswith('resnet'):
        model = ResNetV2(args.model)
    else:
        raise ValueError('model not supported yet {}'.format(args.model))
    contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t,
                          args.nce_m)
    criterion_l = NCECriterion(n_data)
    criterion_ab = NCECriterion(n_data)

    if torch.cuda.is_available():
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model).cuda()
        else:
            model = model.cuda()
        contrast = contrast.cuda()
        criterion_ab = criterion_ab.cuda()
        criterion_l = criterion_l.cuda()
        cudnn.benchmark = True

    return model, contrast, criterion_ab, criterion_l
def set_model(args, n_data):
    # set the model
    if args.model == 'alexnet':
        model = alexnet(args.feat_dim)
    else:
        raise ValueError('model not supported yet {}'.format(args.model))
    contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t,
                          args.nce_m)
    criterion_l = NCECriterion(n_data)
    criterion_ab = NCECriterion(n_data)

    if torch.cuda.is_available():
        model = model.cuda()
        contrast = contrast.cuda()
        criterion_ab = criterion_ab.cuda()
        criterion_l = criterion_l.cuda()
        cudnn.benchmark = True

    return model, contrast, criterion_ab, criterion_l
Exemple #3
0
def set_model(args, n_data):
    # set the model
    if args.model == 'alexnet':
        model = MyAlexNetCMC(args.feat_dim)
    elif args.model.startswith('resnet'):
        model = MyResNetsCMC(args.model)
    else:
        raise ValueError('model not supported yet {}'.format(args.model))

    contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t, args.nce_m, args.softmax)
    criterion_l = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data)
    criterion_ab = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data)

    if torch.cuda.is_available():
        model = model.cuda()
        contrast = contrast.cuda()
        criterion_ab = criterion_ab.cuda()
        criterion_l = criterion_l.cuda()
        cudnn.benchmark = True

    return model, contrast, criterion_ab, criterion_l
        self.embeddings = x
        # treat the last batch specially
        if i == self.loader_length - 1:
            self.prepare_indices(batch_size, bs)

        pos_logits = self.compute_data_prob()
        neg_logits = self.compute_noise_prob()
        outs, probs = self.nce_core(pos_logits, neg_logits)
        # pdb.set_trace()

        with torch.no_grad():
            self.update_new_data_memory(idxs)

        return outs, probs


if __name__ == '__main__':
    c_average = ClusterAverage(128, 9000, 239, 4, 0.07, 3).cuda()
    print(c_average.neg_indices)
    n_average = NCEAverage(128, 9000, 239, 4, 0.07, 3).cuda()

    # initialize c_average with n_average
    c_average.init_memoryBank(n_average)

    dummy_embeddings = torch.randn(12, 128).cuda()
    dummy_embeddings = l2_normalize(dummy_embeddings)
    idxs = torch.arange(3).cuda()
    outs_c, probs_c = c_average(dummy_embeddings, idxs, 0)
    outs_n, probs_n, _ = n_average(dummy_embeddings, idxs, 0)
    pdb.set_trace()
Exemple #5
0
        assert bsz == pos_indices.size(0), 'FATAL ERROR!'
        # batchSize = bsz // self.clips_num

        x_other = x[pos_indices]  # (bsz, clips_num-1, x.size(1))
        x_other = torch.mean(x_other, dim=1)  # same shape as x

        # input_log_softmax = torch.nn.functional.log_softmax(x, dim=1)
        input_log = torch.log(x)
        # target_softmax = torch.nn.functional.softmax(x_other, dim=1)

        return torch.nn.functional.kl_div(input_log, x_other, reduction='batchmean')



if __name__ == '__main__':
    average = NCEAverage(128, 9000, 1000, 4, 0.07, 3).cuda()
    criterion = NCECriterion(9000).cuda()
    kl_criterion = KLCriterion(4).cuda()
    # print(average.pos_indices)
    # print(average.neg_indices)
    dummy_embeddings = torch.randn(12, 128).cuda()
    dummy_embeddings = l2_normalize(dummy_embeddings)
    idxs = torch.arange(3).cuda()

    outs, probs= average(dummy_embeddings, idxs, 0)
    print(outs[:, 0].mean())
    print(probs)
    loss = criterion(outs).item()
    print(loss)
    kl_loss = kl_criterion(outs, average.pos_indices).item()
    print(kl_loss)