Пример #1
0
def map_bb_outputs_to_pred_bbs(outputs, anchors, grids, log=False):
    if log:
        print("map_bb_outputs_to_pred_bbs")
        print("outputs :", outputs)
        print("anchors :", anchors)
        print("grids :", grids)

    # The first two values in the output represent a translation of the anchor box's center.
    # Grid size is the width and height of the receptive field
    # delta_center is bounded on the range (-grid_size, grid_size);
    # that is, the center remains within the original receptive field.
    delta_center = outputs[:, :2] * (util.to_gpu(grids[:, :2]))

    if log: print("delta_center :", delta_center)

    # The last two values in the output represent the width and height of the bounding box.
    # These values are interpreted as a precentage of the original anchor box's width and height.
    # percent_sizes is on the range (.5, 1.5). We add 1 since actn_bbs is on the range (-1, 1)
    percent_sizes = outputs[:, 2:] + 1

    if log: print("percent_sizes :", percent_sizes)

    actn_centers = delta_center + util.to_gpu(
        anchors)[:, :2]  # Calculate predicted center_x and center_y
    actn_wh = percent_sizes * util.to_gpu(
        anchors)[:, 2:]  # Calculate predicted width and height

    if log: print("returns :", torch.cat([actn_centers, actn_wh], dim=1))

    return torch.cat([actn_centers, actn_wh], dim=1)
Пример #2
0
def map_label_to_ground_truth(raw_label_bbs,
                              raw_label_classes,
                              anchors,
                              grids,
                              imsize,
                              log=False):
    label_bbs, label_classes = format_label(raw_label_bbs, raw_label_classes,
                                            imsize)

    if log:
        print("map_label_to_ground_truth")
        print("label_bbs: ", label_bbs)
        print("label_classes: ", label_classes)

    distances = jaccard(label_bbs, anchors)

    if log: print("distances: ", distances)

    prior_overlap, prior_idx = distances.max(1)

    #if log: print("prior_distances: ", prior_overlap); print("prior_idx: ", prior_idx)

    gt_overlap, gt_idx = distances.max(0)

    #if log: print("gt_distances: ", gt_overlap); print("gt_idx: ", gt_idx)

    gt_overlap[prior_idx] = 1.99

    for i, o in enumerate(prior_idx):
        gt_idx[o] = i

    #if log: print("gt_distances: ", gt_overlap); print("gt_idx: ", gt_idx)

    gt_classes = label_classes[gt_idx]

    #if log: print("gt_classes: ", gt_classes)

    matches = gt_overlap >= 0.5

    #if log: print("matches: ", matches)

    matching_idxs = torch.nonzero(matches)[:, 0]

    cls_matches = torch.nonzero(matches + (gt_overlap < .4))[:, 0]

    if log: print("matching_idxs: ", matching_idxs)

    gt_classes[matches != 1] = 0

    gt_bbs = label_bbs[gt_idx]

    if log:
        print("gt_classes: ", gt_classes[matching_idxs])
        print("gt_bbs: ", gt_bbs[matching_idxs])

    return util.to_gpu(gt_bbs), gt_classes, util.to_gpu(
        matching_idxs), cls_matches
Пример #3
0
    def on_batch_end(self, session, schedule, cb_dict, loss, input, output,
                     label):
        label = Variable(util.to_gpu(label))

        self.train_accuracy.update(output, label)
        self.train_loss.update(loss)
        self.train_raw_loss.update(
            self.loss_fn(output[-1], label).data.cpu(), label.shape[0])
Пример #4
0
def compute_embeddings(model, dataloader, max_num):
    outputs = []
    labels = []

    num = 0

    with EvalModel(model) and torch.no_grad():
        for input, label in dataloader:
            output = model.forward(Variable(util.to_gpu(input)))
            if type(output) is list:
                output = output[0]
            outputs.append(output.data.cpu().view(output.size(0), -1))
            labels.append(label)
            num += label.shape[0]

            if num > max_num: break

    cat = torch.cat(outputs).numpy()
    labels = torch.cat(labels).numpy()

    return cat, labels
Пример #5
0
def tensorboard_embeddings(model,
                           select,
                           dataloader,
                           targets,
                           images,
                           board='./runs'):
    old_select = model._to_select
    model._to_select = select
    writer = SummaryWriter(board)

    outputs = {name: [] for name in select}

    with EvalModel(model):
        for input, label in dataloader:
            output = model.forward(Variable(util.to_gpu(input)))
            for layer in output:
                outputs[layer[1]].append(layer[0].data.cpu().view(
                    layer[0].size(0), -1))

    for name, output in outputs.items():
        cat = torch.cat(output)
        writer.add_embedding(cat, tag=name, metadata=targets, label_img=images)
Пример #6
0
    def single_example_loss(self,
                            pred_classes,
                            bb_outputs,
                            label_classes,
                            label_bbs,
                            log=False):
        gt_bbs, gt_classes, matching_idxs, cls_matches = map_label_to_ground_truth(
            label_bbs, label_classes, self.anchors, self.grids, self.imsize)

        if (log):
            print("gt_classes: ", gt_classes)
            print("pred_classes: ", pred_classes)

        pred_bbs = map_bb_outputs_to_pred_bbs(bb_outputs, self.anchors,
                                              self.grids)

        loc_loss = F.smooth_l1_loss(pred_bbs[matching_idxs].float(),
                                    gt_bbs[matching_idxs].float(),
                                    size_average=False)

        clas_loss = self.loss_f(pred_classes[util.to_gpu(cls_matches)],
                                gt_classes[cls_matches])

        return loc_loss, clas_loss / max(len(matching_idxs), 1)
Пример #7
0
    def run(self, session, cb_dict):
        with EvalModel(session.model) and torch.no_grad():
            for input, label, *_ in tqdm(self.val_data,
                                         desc="Validating",
                                         leave=False):
                label = Variable(util.to_gpu(label))
                output = session.forward(input)

                self.valid_loss.update(
                    session.criterion(output, label).data.cpu(),
                    input.shape[0])
                self.valid_raw_loss.update(
                    self.loss_fn(output[-1], label).data.cpu(), input.shape[0])

                self.valid_accuracy.update(output, label)

        valid_loss = self.valid_loss.raw_avg
        valid_raw_loss = self.valid_raw_loss.raw_avg
        valid_triplet_loss = valid_loss - valid_raw_loss

        cb_dict["Loss/Valid"] = valid_loss
        cb_dict["Raw/Valid"] = valid_raw_loss
        cb_dict["Trip/Valid"] = valid_triplet_loss
        cb_dict["Acc/Valid"] = self.valid_accuracy.metric()
Пример #8
0
def fgsm_test(model, dataloader, epsilon):
    with EvalModel(model):
        # Accuracy counter
        correct = 0
        total = 0
        adv_examples = []

        # Loop over all examples in test set
        for data, target in tqdm(dataloader,
                                 desc=f"Epsilon={epsilon}",
                                 leave=False):
            total += len(target)

            # Send the data and label to the device
            data, target = util.to_gpu(data), util.to_gpu(target)

            # Set requires_grad attribute of tensor. Important for Attack
            data.requires_grad = True

            # Forward pass the data through the model
            output = model(data)
            if isinstance(output, list): output = output[-1]

            output = F.log_softmax(output, dim=1)

            _, init_pred = output.max(
                1)  # get the index of the max log-probability

            # If the initial prediction is wrong, dont bother attacking, just move on
            mask = init_pred == target

            output = output[mask]
            target = target[mask]

            if len(target) == 0: continue

            # Calculate the loss
            loss = F.nll_loss(output, target)

            # Zero all existing gradients
            model.zero_grad()

            # Calculate gradients of model in backward pass
            loss.backward()

            # Collect datagrad
            data_grad = data.grad.data

            # Call FGSM Attack
            perturbed_data = fgsm_attack(data[mask], epsilon, data_grad[mask])

            # Re-classify the perturbed image

            output = model(perturbed_data)
            if isinstance(output, list): output = output[-1]

            # Check for success
            _, final_pred = output.max(
                1)  # get the index of the max log-probability

            correct += torch.sum(final_pred == target)

        # Calculate final accuracy for this epsilon
        final_acc = correct / float(total)
        print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(
            epsilon, correct, total, final_acc))

        # Return the accuracy and an adversarial example
        return final_acc, adv_examples
Пример #9
0
 def forward(self, outputs, label):
     target = one_hot_embedding(label, self.num_classes + 1) # +1 for background
     target = util.to_gpu(Variable(target[:,1:].contiguous())) # Ignore background and send to GPU
     pred = outputs[:,1:] # Get the models predictions (no background)
     weight = self.get_weight(pred, target)
     return F.binary_cross_entropy_with_logits(pred, target, weight, size_average=False)