예제 #1
0
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
    # Initialize random weights
    w = features.new(torch.randn(1, features.size(0)))
    b = features.new(torch.randn(1))

    if print_time:
        pt_time = AverageMeter()
        end = time.time()

    for epoch in range(epochs):
        # Forward
        label_predictions = w.matmul(features).add(b).sign()

        # Compute accuracy
        correct = label_predictions.mul(labels)
        accuracy = correct.add(1).div(2).mean()
        if crypten.is_encrypted_tensor(accuracy):
            accuracy = accuracy.get_plain_text()

        # Print Accuracy once
        if crypten.communicator.get().get_rank() == 0:
            logging.info(f"Epoch {epoch} --- Training Accuracy %.2f%%" %
                         (accuracy.item() * 100))

        # Backward
        loss_grad = -labels * (1 - correct) * 0.5  # Hinge loss
        b_grad = loss_grad.mean()
        w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))

        # Update
        w -= w_grad * lr
        b -= b_grad * lr

        if print_time:
            iter_time = time.time() - end
            pt_time.add(iter_time)
            logging.info("    Time %.6f (%.6f)" % (iter_time, pt_time.value()))
            end = time.time()

    return w, b
예제 #2
0
def validate(val_loader, model, criterion, print_freq=10, flatten=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, target) in enumerate(val_loader):
            # compute output
            if flatten:
                input = input.view(input.size(0), -1)
            if isinstance(model, crypten.nn.Module
                          ) and not crypten.is_encrypted_tensor(input):
                input = crypten.cryptensor(input)

            output = model(input)

            if crypten.is_encrypted_tensor(output):
                output = output.get_plain_text()
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            losses.add(loss.item(), input.size(0))
            top1.add(prec1[0], input.size(0))
            top5.add(prec5[0], input.size(0))

            # measure elapsed time
            current_batch_time = time.time() - end
            batch_time.add(current_batch_time)
            end = time.time()

            if (i + 1) % print_freq == 0:
                logging.info("\nTest: [{}/{}]\t"
                             "Time {:.3f} ({:.3f})\t"
                             "Loss {:.4f} ({:.4f})\t"
                             "Prec@1 {:.3f} ({:.3f})   \t"
                             "Prec@5 {:.3f} ({:.3f})".format(
                                 i + 1,
                                 len(val_loader),
                                 current_batch_time,
                                 batch_time.value(),
                                 loss.item(),
                                 losses.value(),
                                 prec1[0],
                                 top1.value(),
                                 prec5[0],
                                 top5.value(),
                             ))
            if i > 100:
                break

        logging.info(" * Prec@1 {:.3f} Prec@5 {:.3f}".format(
            top1.value(), top5.value()))

    return top1.value()
예제 #3
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          epoch,
          print_freq=10,
          flatten=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # compute output
        if flatten:
            input = input.view(input.size(0), -1)
        output = model(input)
        loss = criterion(output, target)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output, target, topk=(1, 5))
        losses.add(loss.item(), input.size(0))
        top1.add(prec1[0], input.size(0))
        top5.add(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        current_batch_time = time.time() - end
        batch_time.add(current_batch_time)
        end = time.time()

        if i % print_freq == 0:
            logging.info("Epoch: [{}][{}/{}]\t"
                         "Time {:.3f} ({:.3f})\t"
                         "Loss {:.4f} ({:.4f})\t"
                         "Prec@1 {:.3f} ({:.3f})\t"
                         "Prec@5 {:.3f} ({:.3f})".format(
                             epoch,
                             i,
                             len(train_loader),
                             current_batch_time,
                             batch_time.value(),
                             loss.item(),
                             losses.value(),
                             prec1[0],
                             top1.value(),
                             prec5[0],
                             top5.value(),
                         ))
예제 #4
0
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
    # Initialize random weights
    w = features.new(torch.randn(1, features.size(0)))
    b = features.new(torch.randn(1))

    if print_time:
        pt_time = AverageMeter()
        end = time.time()

    for epoch in range(epochs):
        # Forward
        #pdb.set_trace()
        label_predictions_1 = w.matmul(features)
        print(label_predictions_1)
        #pdb.set_trace()
        #result = recursive_map(label_predictions_1)
        #pdb.set_trace()
        #label_predictions = w.matmul(features).add(b).sign()
        for i in range(len(label_predictions_1[0])):
            #pdb.set_trace()
            if (label_predictions_1[0][i] < -0.5):
                label_predictions_1[0][i] = 0.0
            elif label_predictions_1[0][i] > 0.5:
                label_predictions_1[0][i] = 1.0
            else:
                label_predictions_1[0][i] = label_predictions_1[0][i] + 0.5
            # f(wx) + b
            #label_predictions_1[0][i] = label_predictions_1[0][i].add(b).sign()
            label_predictions_1[0][i] = label_predictions_1[0][i].add(b)

        print(label_predictions_1)
        #pdb.set_trace()

        #label_predictions = logistic_regression(w.matmul(features)).add(b).sign()
        #print("Iteration: " + epoch)
        #print(label_predictions)
        # Compute accuracy
        label_predictions = label_predictions_1
        correct = label_predictions.mul(labels)
        accuracy = correct.add(1).div(2).mean()
        if crypten.is_encrypted_tensor(accuracy):
            accuracy = accuracy.get_plain_text()

        # Print Accuracy once
        if crypten.communicator.get().get_rank() == 0:
            print(f"Epoch {epoch} --- Training Accuracy %.2f%%" %
                  (accuracy.item() * 100))

        # Backward
        loss_grad = -labels * (1 - correct) * 0.5  # Hinge loss
        b_grad = loss_grad.mean()
        w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))

        # Update
        w -= w_grad * lr
        b -= b_grad * lr

        if print_time:
            iter_time = time.time() - end
            pt_time.add(iter_time)
            logging.info("    Time %.6f (%.6f)" % (iter_time, pt_time.value()))
            end = time.time()

    return w, b