Пример #1
0
def system_check():
    import traceback

    try:
        import torch
        from olympus.models import Model
        from olympus.optimizers import Optimizer

        batch = torch.randn((32, 3, 64, 64)).cuda()
        model = Model('resnet18', input_size=(3, 64, 64),
                      output_size=(10, )).cuda()

        model.init()

        optimizer = Optimizer('sgd', params=model.parameters())

        optimizer.init(**optimizer.defaults)

        optimizer.zero_grad()
        loss = model(batch).sum()

        optimizer.backward(loss)
        optimizer.step()

        return True
    except:
        error(traceback.format_exc())
        return False
Пример #2
0
model = model.to(device=device)
loss = 0

event_handler.start_train()

for e in range(epochs):
    losses = []
    event_handler.new_epoch(e + 1)

    for step, ((batch, ), target) in enumerate(loader.train()):
        event_handler.new_batch(step)

        optimizer.zero_grad()
        predict = model(batch.to(device=device))

        loss = F.cross_entropy(predict, target.to(device=device))
        losses.append(loss.detach())

        optimizer.backward(loss)
        optimizer.step()

        event_handler.end_batch(step)

    losses = [l.item() for l in losses]
    loss = sum(losses) / len(losses)
    event_handler.end_epoch(e + 1)

event_handler.end_train()
print(loss)
Пример #3
0
    def generate(self,
                 image,
                 min_confidence,
                 lr=0.7,
                 max_iter=500,
                 round_trip=False):
        """Generate an adversarial example that should be misclassified as target_class

        Parameters
        ----------

        image: Union[Tensor, List[Image]]
            list of images to tamper with

        min_confidence: float
            Confidence we need to reach before stopping

        lr: float
            learning rate for the optimizer

        max_iter: int
            Maximal number of iteration

        round_trip: bool
            when enabled the tensor is periodically converted to image and back to tensor
        """
        self.model.eval()

        target_confidence = 0
        target = self.target_class
        original_image, batch = self.to_batch(image)

        for i in range(max_iter):
            if target_confidence > min_confidence:
                break

            batch.requires_grad = True

            optimizer = Optimizer('sgd',
                                  params=[batch],
                                  lr=lr,
                                  momentum=0,
                                  weight_decay=0)

            probabilities = F.softmax(self.model(batch), dim=1)

            class_predicted = torch.argmax(probabilities)
            prediction_confidence = probabilities[0, class_predicted]
            target_confidence = probabilities[0, target]

            self.stats.update(class_predicted, prediction_confidence,
                              target_confidence, probabilities)

            debug(
                f'{i:4d} Predicted {class_predicted} with {prediction_confidence:.4f},'
                f'our target: {target} has {target_confidence.item():.4f}')

            self.model.zero_grad()
            optimizer.backward(-1 * target_confidence)
            optimizer.step()

            if round_trip:
                batch = self.preprocessor(self.postprocessor(batch))

        noises = self.get_noise(batch, original_image)
        return batch, noises