fp_inputs_pkl.close()

# Target ys
# num_target_classes x num_perturb x num_class
fp_target = 0.254*np.ones((args.num_class, args.num_dx, args.num_class))

for j in range(args.num_dx):
    for i in range(args.num_class):
        fp_target[i,j,i] = - 0.7

fp_target = 1.5*fp_target
fp_target_pkl = open(os.path.join(args.log_dir, "fp_outputs.pkl"), "wb")
pickle.dump(fp_target, fp_target_pkl)
fp_target_pkl.close()

fp_target = util.np2var(fp_target, args.cuda)

fp = Fingerprints()
fp.dxs = fp_dx
fp.dys = fp_target

from model import CW2_Net as Net
#from res_model import ResNet as Net
from models import *

print("Train using model", Net)

model = Net()
if args.cuda:
    model.cuda()
예제 #2
0
def test(epoch, args, model, data_loader, fp_dx, fp_target, test_length=None):
    model.eval()
    test_loss = 0
    correct = 0
    correct_fp = 0
    fingerprint_accuracy = []

    loss_y = 0
    loss_dy = 0
    num_same_argmax = 0

    for e,(data, target) in enumerate(data_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()

        data, target = Variable(data), Variable(target)

        with torch.no_grad():
            data_np = util.var2np(data, args.cuda)
            real_bs = data_np.shape[0]

            logits = model(data)
            output = F.log_softmax(logits)
            logits_norm = logits * torch.norm(logits, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)


            fp_target_var = torch.index_select(fp_target, 0, target)

            # votes_dict = defaultdict(lambda: {i:0 for i in range(args.num_class)})
            loss_n = torch.nn.MSELoss()
            for i in range(args.num_dx):
                dx = fp_dx[i]
                fp_target_var_i = fp_target_var[:,i,:]

                logits_p = model(data + util.np2var(dx, args.cuda))
                output_p = F.log_softmax(logits_p)
                logits_p_norm = logits_p * torch.norm(logits_p, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)

                logits_p_class = logits_p_norm.data.max(0, keepdim=True)[1]

                diff = logits_p_norm - logits_norm
                diff_class = diff.data.max(1, keepdim=True)[1]
                #diff = diff * torch.norm(diff, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)

                fp_target_class = fp_target_var_i.data.max(1, keepdim=True)[1]
                loss_y += loss_n(logits_p_norm, fp_target_var_i)
                loss_dy += 10.0*loss_n(diff, fp_target_var_i)
                num_same_argmax += torch.sum(
                        diff_class == fp_target_class).item()

                # for sample_num in range(real_bs):
                #     fingerprint_class = np.argmax(util.var2np(diff, args.cuda)[sample_num,:])
                #     fingerprinted_class = np.argmax(fp_target_var[i,sample_num,:])
                #     fingerprint_accuracy.append((fingerprint_class,fingerprinted_class))

                #     votes_dict[sample_num][fingerprint_class] += 1
            test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

            # print(e, "pred:", pred, "label:", target, correct)
            # pred_fp = torch.from_numpy(get_majority(votes_dict).astype(int))
            # correct_fp += pred_fp.eq(target.data.view_as(pred_fp)).cpu().sum()
        if(test_length is None):
            test_length = len(data_loader.dataset)
        test_loss /= test_length

        loss_y /= test_length
        loss_dy /= test_length
        argmax_acc = num_same_argmax*1.0 / (test_length * args.num_dx)

        print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
            test_loss, correct, test_length,
            100. * correct / test_length))

        # print('\nTest set: Average loss: {:.4f}, Fingerprint Accuracy: {}/{} ({:.0f}%)\n'.format(
        #     test_loss, correct_fp, len(data_loader.dataset),
        #     100. * correct_fp / len(data_loader.dataset)))

        print('Fingerprints (on test): L(fp, y) loss: {:.4f}, L(fp, dy) loss: {:.4f}, argmax y = argmax f(x+dx) Accuracy: {}/{} ({:.0f}%)'.format(
            loss_y.cpu().item(), loss_dy.cpu().item(), num_same_argmax, len(data_loader.dataset) * args.num_dx,
            100. * argmax_acc))

        result = {"epoch": epoch,
                  "test-loss": test_loss,
                  "test-correct": correct,
                  "test-N": test_length,
                  "test-acc": correct/test_length,
                  "fingerprint-loss (y)": loss_y.item(),
                  "fingerprint-loss (dy)": loss_dy.item(),
                  "fingerprint-loss (argmax)": argmax_acc,
                  "args": args
                  }
        path = os.path.join(args.log_dir, "train", "log-ep-{}.pkl".format(epoch))
        print("Saving log in", path)
        pickle.dump(result, open(path, "wb"))
    return test_loss
예제 #3
0
def train(epoch, args, model, optimizer, data_loader, fp_dx, fp_target, ds_name = None):

    fingerprint_accuracy = []

    loss_n = torch.nn.MSELoss()

    for batch_idx, (x, y) in enumerate(data_loader):
        model.train()
        if args.cuda:
            x, y = x.cuda(), y.cuda()
        x, y = Variable(x), Variable(y)

        optimizer.zero_grad()
        real_bs = y.size(0)
        loss_func = nn.CrossEntropyLoss()

        # Batch x args.num_dx x output_size
        fp_target_var = torch.index_select(fp_target, 0, y)

        ## Add loss for (y+dy,model(x+dx)) for each sample, each dx
        data_np = util.var2np(x, args.cuda)
        x_net = x
        for i in range(args.num_dx):
            dx = fp_dx[i]
            fp_target_var_i = fp_target_var[:,i,:]
            dx = util.np2var(dx,args.cuda)
            x_net = torch.cat((x_net,x+dx))

        logits_net = model(x_net)
        output_net = F.log_softmax(logits_net)

        yhat = output_net[0:real_bs]
        logits = logits_net[0:real_bs]
        logits_norm = logits * torch.norm(logits, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)
        loss_fingerprint_y = 0
        loss_fingerprint_dy = 0
        loss_vanilla = loss_func(yhat, y)

        for i in range(args.num_dx):
            dx = fp_dx[i]
            fp_target_var_i = fp_target_var[:,i,:]
            logits_p = logits_net[(i+1)*real_bs:(i+2)*real_bs]
            logits_p_norm = logits_p * torch.norm(logits_p, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)
            diff_logits_p = logits_p_norm - logits_norm + 0.00001
            #diff_logits_p = diff_logits_p * torch.norm(diff_logits_p, 2, 1, keepdim=True).reciprocal().expand(real_bs, args.num_class)
            loss_fingerprint_y += loss_n(logits_p_norm, fp_target_var_i)
            loss_fingerprint_dy += loss_n(diff_logits_p, fp_target_var_i)
            
        if(ds_name == "cifar"):
            if(epoch>=0):
                loss = loss_vanilla + (1.0+50.0/args.num_dx)*loss_fingerprint_dy # + loss_fingerprint_y
            else:
                loss = loss_vanilla
        else:
            if(epoch>=0):
                loss = loss_vanilla + 1.0*loss_fingerprint_dy # + loss_fingerprint_y
            else:
                loss = loss_vanilla
        loss.backward()
        optimizer.step()

        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss vanilla: {:.3f} fp-y: {:.3f} fp-dy: {:.3f} Total Loss: {:.3f}'.format(
                epoch, batch_idx * len(x), len(data_loader.dataset),
                100. * batch_idx / len(data_loader),
                loss_vanilla.item(),
                loss_fingerprint_y.item(),
                loss_fingerprint_dy.item(),
                loss.item()))
예제 #4
0
def model_with_fingerprint(model, x, fp, args):
    # x : B x C x W x H with B = 1
    # Check y' = f(x+dx) for all dx

    x = util.t2var(x, args.cuda)

    assert x.size()[0] == 1 # batch

    # Get perturbations for predicted class

    logits = model(x)
    log_yhat = F.log_softmax(logits)

    yhat = F.softmax(logits)
    y_class = yhat.data.max(1, keepdim=True)[1]
    y_class = util.t2np(y_class, args.cuda)[0,0]

    # fixed_dxs : num_perturb x C x W x H
    fixed_dxs = util.np2var(np.concatenate(fp.dxs, axis=0), cuda=args.cuda)

    # cmopute x + dx : broadcast! num_perturb x C x W x H
    xp = x + fixed_dxs

    # if args.debug: print("xp", xp.size(), "x", x.size(), "fixed_dxs", fixed_dxs.size())

    logits_p = model(xp)
    log_yhat_p  = F.log_softmax(logits_p)
    yhat_p = F.softmax(logits_p)

    if args.debug:
      print("logits_p", logits_p, "log_yhat_p", log_yhat_p)
      print("yhat_p", yhat_p)

    # compute f(x + dx) : num_perturb x num_class

    # print("get fixed_dys : num_target_class x num_perturb x num_class: for each target class, a set of perturbations and desired outputs (num_class).")
    fixed_dys = util.np2var(fp.dys, cuda=args.cuda)

    logits_norm = logits * torch.norm(logits, 2, 1, keepdim=True).reciprocal().expand(1, args.num_class)
    logits_p_norm = logits_p * torch.norm(logits_p, 2, 1, keepdim=True).reciprocal().expand(args.num_dx, args.num_class)

    if args.debug:
      print("logits_norm", logits_norm)
      print("logits_p_norm", logits_p_norm.size(), torch.norm(logits_p_norm, 2, 1))

    diff_logits_p = logits_p_norm - logits_norm
    #diff_logits_p = diff_logits_p * torch.norm(diff_logits_p, 2, 1, keepdim=True).reciprocal().expand(args.num_dx, args.num_class)


    diff = fixed_dys - diff_logits_p

    if args.debug:
      print("diff_logits_p", diff_logits_p)
      print("fixed_dys", fixed_dys)
      print("diff", diff)


    diff_norm = torch.norm(diff, 2, dim=2)

    if args.debug: print("diff_norm (over dim 2 of diff)", diff_norm)

    diff_norm = torch.mean(diff_norm, dim=1)

    if args.debug: print("diff_norm after mean", diff_norm)

    y_class_with_fp = diff_norm.data.min(0, keepdim=True)[1]
    y_class_with_fp = util.t2np(y_class_with_fp, args.cuda)[0]

    if args.debug:
      print("y_class_with_fp", y_class_with_fp, diff_norm.data.min(0, keepdim=True))

    ex = Example(x, yhat, y_class)
    ex.dxs = fixed_dxs
    ex.yhat_p = yhat_p
    ex.diff = diff
    ex.diff_norm = diff_norm
    ex.y_class_with_fp = y_class_with_fp

    return ex