Example #1
0
    def get_condition(self, z_inp, z_out, x_batches, y_batches):
        z_out = transform_network_output(z_out, self.network_output)[0]
        targets = y_batches[0]

        rules = []
        rules.append(
            dl2.Implication(
                dl2.BoolConst(targets == I['car']),
                dl2.GEQ(z_out[:, I['truck']],
                        z_out[:, I['dog']] + self.margin)))
        rules.append(
            dl2.Implication(
                dl2.BoolConst(targets == I['deer']),
                dl2.GEQ(z_out[:, I['horse']],
                        z_out[:, I['ship']] + self.margin)))
        rules.append(
            dl2.Implication(
                dl2.BoolConst(targets == I['plane']),
                dl2.GEQ(z_out[:, I['ship']],
                        z_out[:, I['frog']] + self.margin)))
        rules.append(
            dl2.Implication(
                dl2.BoolConst(targets == I['dog']),
                dl2.GEQ(z_out[:, I['cat']],
                        z_out[:, I['truck']] + self.margin)))
        rules.append(
            dl2.Implication(
                dl2.BoolConst(targets == I['cat']),
                dl2.GEQ(z_out[:, I['dog']], z_out[:, I['car']] + self.margin)))
        return dl2.And(rules)
Example #2
0
    def __box_to_constraint(self, box, point):
        # x, y = point
        box_conditions = []

        x_coords = [x for x, y in box]
        y_coords = [y for x, y in box]
        min_x = min(x_coords)
        max_x = max(x_coords)
        min_y = min(y_coords)
        max_y = max(y_coords)

        conditions = dl2.And([
            dl2.GEQ(point[:, 0], min_x),
            dl2.LEQ(point[:, 0], max_x),
            dl2.GEQ(point[:, 1], min_y),
            dl2.LEQ(point[:, 1], max_y)
        ])

        return conditions
Example #3
0
def train(epoch):
    tot_err, tot_dl2_loss = 0, 0
    random.shuffle(train_graphs)
    for i, g in enumerate(train_graphs):

        model.train()
        with torch.no_grad():
            idx = torch.LongTensor([g.x, g.y])
            v = torch.FloatTensor(np.ones(len(g.x)))
            adj = torch.sparse.FloatTensor(idx, v,
                                           torch.Size([g.n, g.n])).to_dense()
            if args.cuda:
                adj = adj.cuda()

        optimizer.zero_grad()
        out = model.forward(adj)
        dist = torch.FloatTensor([g.p[0, i] for i in range(g.n)])
        if args.cuda:
            dist = dist.cuda()

        err = torch.mean((dist - out) * (dist - out))
        tot_err += err.detach()
        if not args.baseline:
            conjunction = []
            for a in range(1, g.n):
                disjunction = []
                for b in range(g.n):
                    if adj[a, b]:
                        disjunction.append(dl2.EQ(out[a], out[b] + 1))
                        conjunction.append(dl2.LEQ(out[a], out[b] + 1))
                    conjunction.append(dl2.Or(disjunction))
            conjunction.append(dl2.EQ(out[0], 0))
            for a in range(0, g.n):
                conjunction.append(dl2.GEQ(out[0], 0))
            constraint = dl2.And(conjunction)
            dl2_loss = constraint.loss(args)
            dl2_loss.backward()
            tot_dl2_loss += dl2_loss.detach()
        else:
            err.backward()

        optimizer.step()
Example #4
0
    def get_condition(self, z_inp, z_out, x_batches, y_batches):
        n_batch = x_batches[0].size()[0]
        z_logits = F.log_softmax(z_out[0], dim=1)

        d1 = torch.norm(z_inp[0] - x_batches[0], dim=1)
        d2 = torch.norm(z_inp[0] - x_batches[1], dim=1)

        w1 = d1 / (d1 + d2)
        w2 = d2 / (d1 + d2)

        pred_logits_1 = z_logits[np.arange(n_batch), y_batches[0]]
        pred_logits_2 = z_logits[np.arange(n_batch), y_batches[1]]

        pre = dl2.And([
            dl2.BoolConst(y_batches[0] != y_batches[1]),
            dl2.LEQ(
                torch.norm((x_batches[0] - x_batches[1]).view((n_batch, -1)),
                           dim=1), self.eps)
        ])
        ce = -(w1 * pred_logits_1 + w2 * pred_logits_2)

        return dl2.Implication(pre, dl2.LT(ce, self.p_limit))
Example #5
0
def train(epoch):
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    softmax = torch.nn.Softmax()

    print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, args.lr))
    if args.skip_labled:
        tl = [None] * 200000
    else:
        tl = trainloader_lab
    for batch_idx, (lab, ulab) in enumerate(zip(tl, trainloader_unlab)):
        inputs_u, targets_u = ulab
        inputs_u, targets_u = Variable(inputs_u), Variable(targets_u)
        n_u = inputs_u.size()[0]
        if use_cuda:
            inputs_u, targets_u = inputs_u.cuda(), targets_u.cuda() # GPU settings

        if lab is None:
            n = 0
            all_outputs = net(inputs_u)
        else:
            inputs, targets = lab
            inputs, targets = Variable(inputs), Variable(targets)
            n = inputs.size()[0]
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
            all_outputs = net(torch.cat([inputs, inputs_u], dim=0))

        optimizer.zero_grad()
        outputs_u = all_outputs[n:,]
        logits_u = F.log_softmax(outputs_u)
        probs_u = softmax(outputs_u)

        outputs = all_outputs[:n,]
        if args.skip_labled:
            ce_loss = 0
        else:
            outputs = all_outputs[:n,]
            ce_loss = criterion(outputs, targets)  # Loss
        
        constraint_loss = 0
        if args.constraint == 'DL2':
            eps = args.c_eps * args.decrease_eps_weight**epoch
            dl2_one_group = []
            for i in range(20):
                gsum = 0
                for j in g[i]:
                    gsum += probs_u[:,j]
                dl2_one_group.append(dl2.Or([dl2.EQ(gsum, 1.0), dl2.EQ(gsum, 0.0)]))
            dl2_one_group = dl2.And(dl2_one_group)
            dl2_loss = dl2_one_group.loss(args).mean()
            constraint_loss = dl2_loss
            loss = ce_loss + (args.constraint_weight * args.increase_constraint_weight**epoch) * dl2_loss
        else:
            loss = ce_loss
        loss.backward()  # Backward Propagation
        optimizer.step() # Optimizer update
        
        train_loss += loss.item()
        if args.skip_labled:

            total = 1
            correct = 0
        else:
            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()
        
        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tCE Loss: %.4f, Constraint Loss: %.4f Acc@1: %.3f%%'
                %(epoch, num_epochs, batch_idx+1,
                    (len(train_lab_idx)//batch_size)+1, loss, constraint_loss, 100.*float(correct)/total))
        sys.stdout.flush()
    return 100.*float(correct)/total
Example #6
0
    softmax = torch.nn.Softmax()
    for batch_idx, (inputs, targets) in enumerate(testloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        
        probs = softmax(outputs)
        eps = 0.05
        dl2_one_group = []
        for i in range(20):
            gsum = 0
            for j in g[i]:
                gsum += probs[:, j]
            dl2_one_group.append(dl2.Or([dl2.GT(gsum, 1.0 - eps), dl2.LT(gsum, eps)]))
        constraint = dl2.And(dl2_one_group)
        constraint_correct += constraint.satisfy(args).sum()
        
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        conf_mat += confusion_matrix(targets.data.cpu().numpy(), predicted.cpu().numpy(), labels=np.arange(100))

        n_batch = predicted.size()[0]
        for i in range(n_batch):
            if group[predicted.cpu()[i]] == group[targets.cpu().data[i]]:
                group_ok += 1

    #rint('Confusion matrix:')
    #print(conf_mat)