Esempio n. 1
0
def predict(args, model, use_cuda, test_gallery_loader, test_query_loader):
    if use_cuda:
        model.cuda()
    model.eval()
    with torch.no_grad():  # do not need to calculate information for gradient during eval
        '''gather gallery features'''
        gallery_feats = []
        gallery_names = []
        for idx, (imgs, names) in enumerate(test_gallery_loader):
            if use_cuda:
                imgs = imgs.cuda()
            feat, _ = model(imgs)
            gallery_feats.append(feat)
            gallery_names.append(names)

        gallery_feats = torch.cat(gallery_feats, dim=0)
        gallery_names = torch.cat(gallery_names, dim=0).view(-1)
        '''gather features for each batch of queries'''
        query_feats = []
        for idx, (imgs, names) in enumerate(test_query_loader):
            if use_cuda:
                imgs = imgs.cuda()
            feat, _ = model(imgs)
            query_feats.append(feat)

        query_feats = torch.cat(query_feats, dim=0)
    dist = utils.get_pairwise_distance(query_feats, gallery_feats, metric=args.metric)
    top_idx = dist.argmin(dim=1)
    pred = gallery_names[top_idx].numpy()
    save_csv(args, preds=pred)
Esempio n. 2
0
def eval_epoch(model, validation_data):
    ''' Epoch operation in evaluation phase '''

    model.eval()
    AP.reset()
    mAP.reset()
    Loss_meter.reset()

    for batch_idx, (data, target) in enumerate(validation_data):
        data = data.cuda()
        target = target.cuda()
        data = V(data)
        target = V(target)

        # forward
        pred = model(data)

        pred = F.softmax(pred, 1)
        # backward
        loss = F.cross_entropy(pred, target)

        #one_hot = torch.zeros_like(pred).cuda().scatter(1, target.view(-1, 1), 1)

        one_hot = torch.zeros_like(pred).cuda().scatter(
            1, target.view(-1, 1), 1)

        AP.add(pred.detach(), one_hot)
        mAP.add(pred.detach(), one_hot)

    return Loss_meter.value()[0], mAP.value()
Esempio n. 3
0
    def finetune(self, epoch=0, model=None):
        """Finetune model."""
        self.logger.info("Finetune epoch: {}".format(epoch))
        if model is None:
            model = self.model
        model.train()

        train_loss = 0
        score = 0
        n_batches = int(
            len(self.finetuneloader.dataset) / self.args.batch_size)

        for batch_idx, (inputs, targets, _) in enumerate(self.finetuneloader):
            inputs, targets = wrap_cuda(inputs), wrap_cuda(targets)
            self.optimizer.zero_grad()
            outputs = model(inputs)
            batch_size = outputs.shape[0]
            loss = F.binary_cross_entropy(outputs, targets,
                                          reduction='none').sum() / batch_size
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            score += compute_f_score(outputs, targets).item()
            self.logger.info(
                STATUS_MSG.format(batch_idx + 1, n_batches,
                                  train_loss / (batch_idx + 1),
                                  score / (batch_idx + 1)))
Esempio n. 4
0
def plot_bayesian(path):
    statedict_noise_path = glob.glob(os.path.join(path, '*.accum.pt'))[0]
    withnoise_histo = torch.load(statedict_noise_path)

    bayes_probas_nm = []

    for i in np.arange(-1, -20, -1):
        model = lib.model.MnistModel()
        state_dict = make_statedict(i, withnoise_histo)
        model.load_state_dict(state_dict)
        model.cuda()
        model.eval()

        epoch_probas = np.zeros((18724, 10))

        for idx, data in enumerate(notmnist_loader):
            data = data.cuda()
            output = model(data)
            proba = output.data
            epoch_probas[idx * notmnist_loader.batch_size:(idx + 1) *
                         notmnist_loader.batch_size, :] = proba.cpu().numpy()

        bayes_probas_nm.append(epoch_probas)

    bayes_probas_nm = np.stack(bayes_probas_nm)
    bayes_averaged_probas_nm = np.mean(bayes_probas_nm, axis=0)
    bayes_max_probas_nm = np.exp(bayes_averaged_probas_nm.max(axis=1))
    sns.kdeplot(data=bayes_max_probas_nm)
    plt.legend(labels=[os.path.dirname(path)])
    plt.show()
Esempio n. 5
0
def plot_non_bayesian():
    statedict_nonoise_path = glob.glob(
        os.path.join(model_dir, 'SGD', '*.accum.pt'))[0]
    nonoise_histo = torch.load(statedict_nonoise_path)

    usable_statedict = OrderedDict()
    for k, v in nonoise_histo[-1].items():
        usable_statedict[k] = v
    model = lib.model.MnistModel()
    model.load_state_dict(usable_statedict)
    model = model.cuda()

    model.eval()

    # Plot MNIST
    # probas = []
    # acc = []

    # for data, target in test_loader:
    #     data = data.cuda()
    #     target = target.cuda()
    #     output = model(data)
    #     prediction = output.data.max(1)[1]
    #     proba = output.data.max(1)[0]
    #     probas.append(proba.cpu().numpy())
    #     acc.append(prediction.eq(target.data).cpu().numpy())

    # probas = np.hstack(probas)
    # acc = np.hstack(acc)

    # correct_probas = np.exp(probas[acc == 1])
    # incorrect_probas = np.exp(probas[acc == 0])

    # plt.hist(correct_probas, bins=20, density=True, alpha = .8, label='correct')
    # plt.hist(incorrect_probas, bins=20, density=True, alpha=.8, label='incorrect')
    # plt.xlabel('confidence in prediction')
    # plt.ylabel('normalized counts')
    # plt.legend()
    # plt.show()

    # Plot Not MNIST
    notmnist_probas = []
    for data in notmnist_loader:
        data = data.cuda()
        output = model(data)
        notmnist_probas.append(output.max(1)[0].cpu().data.numpy())

    notmnist_probas = np.hstack(notmnist_probas)
    notmnist_probas = np.exp(notmnist_probas)

    # plt.hist(notmnist_probas, bins=20, density=True, alpha = .8)
    # plt.xlabel('confidence in prediction')
    # plt.ylabel('normalized count')
    # plt.show()

    sns.kdeplot(data=notmnist_probas)
    plt.legend(labels=['SGD'])
    plt.show()
Esempio n. 6
0
def train_epoch(model, training_data, optimizer):
    ''' Epoch operation in training phase'''
    AP.reset()
    mAP.reset()
    Loss_meter.reset()
    model.train()

    for batch_idx, (data, target) in enumerate(training_data):

        data = data.cuda()
        target = target.cuda()

        data = V(data)

        target = V(target)
        #forward
        optimizer.zero_grad()
        pred = model(data)

        pred = F.softmax(pred, 1)
        #backward
        loss = F.cross_entropy(pred, target)

        Loss_meter.add(loss.detach().item())  #转化为了numpy类型

        loss.backward()
        #optimize
        optimizer.step()

        #calculate acc
        one_hot = torch.zeros_like(pred).cuda().scatter(
            1, target.view(-1, 1), 1)

        AP.add(pred.detach(), one_hot)
        mAP.add(pred.detach(), one_hot)

    return Loss_meter.value()[0], mAP.value()
Esempio n. 7
0
    def get_model(self, vocab_size):

        model = getattr(lib.model, self.config["model"])
        return model(vocab_size, **self.config["model_args"])
Esempio n. 8
0
model.load_state_dict(torch.load(path))
#torch.load(path)
model.eval()
A = experts.shape
deltas = []
d = np.random.randint(0,experts.shape[0], 10)
inx = 0
for a in d:#range(1):
    print('inx %d' %inx)
    inx += 1
    state = experts[a][:l1]
    delta = 0
    print(a)
    b = np.random.randint(0,experts.shape[0], 500)
    for i in b:#range(A[0]):
        #print(i)
        c = np.random.randint(0,experts.shape[0], 500)
        for j in c:#range(A[0]):
            in1 = np.hstack([state, experts[i][l1:]])
            in2 = np.hstack([state, experts[j][l1:]])
            in1 = torch.from_numpy(in1).to(device)
            in2 = torch.from_numpy(in2).to(device)
            de = (model(in1) - model(in2))**2
            #de = (model(in1) - model(in2))
            delta += de.data.cpu().numpy()
    print(delta)
    deltas.append(delta)
print(deltas)
print(np.mean(deltas))

Esempio n. 9
0
                                                thinning=100)
# print('burn_in: ', burn_in)

state_accum = []

for epoch in range(1, epochs + 1):
    t0 = time.time()

    print('current_lr: ', current_lr)
    model.train()
    for data, target in train_loader:
        step += 1
        data = data.cuda()
        target = target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        if precond:
            precond.step()
        if block_size > 0 and block_decay > 0 and lr_param:
            optimizer.step(lr=current_lr)
        else:
            optimizer.step()

        prediction = output.data.max(1)[1]  # first column has actual prob.
        accuracy = np.mean(prediction.eq(target.data).cpu().numpy()) * 100
        # print('step: ', step, ', accuracy: ', accuracy, ', loss: ', loss.item())

        statedict = model.state_dict().copy()
        for k, v in statedict.items():