Example #1
0
def quick_matching(det_boxes, gt_boxes, det_cats, gt_cats):
    iou_mask = batch_iou(det_boxes, gt_boxes) >= 0.5
    det_cats = np.expand_dims(det_cats, axis=1)
    gt_cats = np.expand_dims(gt_cats, axis=0)
    cat_mask = (det_cats == gt_cats)
    matching = np.logical_and(iou_mask, cat_mask)
    return matching
Example #2
0
def quick_matching(det_boxes, gt_boxes, det_cats, gt_cats):
    iou_mask = batch_iou(det_boxes, gt_boxes) >= 0.5
    det_cats = np.expand_dims(det_cats, axis=1)
    gt_cats = np.expand_dims(gt_cats, axis=0)
    cat_mask = (det_cats == gt_cats)
    matching = np.logical_and(iou_mask, cat_mask)
    return matching
Example #3
0
def end2end_label_generate(labels, gta, mc):
    """
    //image_per_batch: images. Shape: batch_size x width x height x [b, g, r]
    label_per_batch: labels. Shape: batch_size x object_num
    delta_per_batch: bounding box deltas. Shape: batch_size x object_num x
          [dx ,dy, dw, dh]
    aidx_per_batch: index of anchors that are responsible for prediction.
          Shape: batch_size x object_num
    bbox_per_batch: scaled bounding boxes. Shape: batch_size x object_num x
          [cx, cy, w, h]
    """
    RF = mc.receptive_field
    (W, H) = (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT)
    #label_per_batch = np.zeros(mc.class_count)
    #delta_per_batch = np.zeros((mc.class_count, 4))
    #aidx_per_batch = np.zeros(mc.class_count)
    #bbox_per_batch = np.zeros((mc.class_count,4))
    #label_per_batch.append([b[4] for b in self._rois[idx][:]])
    #gt_bbox = np.array([[b[0], b[1], b[2], b[3]] for b in self._rois[idx][:]])
    label_per_imgs = labels
    gt_bbox = gta
    #bbox_per_batch = gt_bbox
    #aidx_per_batch, delta_per_batch = [], []
    aidx_per_image, delta_per_image = [], []
    aidx_set = set()
    for i in range(len(gt_bbox)):
        #print(len(gt_bbox))
        overlaps = utils.batch_iou(mc.Anchor_box, gt_bbox[i])
        aidx = len(mc.Anchor_box)
        for ov_idx in np.argsort(overlaps)[::-1]:
            if overlaps[ov_idx] <= 0:
                break
            if ov_idx not in aidx_set:
                aidx_set.add(ov_idx)
                aidx = ov_idx
                break

        if aidx == len(mc.Anchor_box):
            # even the largeset available overlap is 0, thus, choose one with the
            # smallest square distance
            dist = np.sum(np.square(gt_bbox[i] - mc.Anchor_box), axis=1)
            for dist_idx in np.argsort(dist):
                if dist_idx not in aidx_set:
                    aidx_set.add(dist_idx)
                    aidx = dist_idx
                    break
        box_cx, box_cy, box_w, box_h = gt_bbox[i]
        delta = [0] * 4
        delta[0] = (box_cx - mc.Anchor_box[aidx][0]) / box_w
        delta[1] = (box_cy - mc.Anchor_box[aidx][1]) / box_h
        delta[2] = np.log(box_w / mc.Anchor_box[aidx][2])
        delta[3] = np.log(box_h / mc.Anchor_box[aidx][3])

        aidx_per_image.append(aidx)
        delta_per_image.append(delta)
    #delta_per_batch.append(delta_per_image)
    #aidx_per_batch.append(aidx_per_image)
    return label_per_imgs, delta_per_image, aidx_per_image, gt_bbox  #label_per_batch, delta_per_batch, aidx_per_batch, bbox_per_batch
Example #4
0
def train(model,
          data_loaders,
          optimizer,
          num_epochs=1,
          log_every=100,
          cuda=True):
    if cuda:
        model.cuda()
    iter_ = 0
    epoch = 0
    best_params = None
    best_val_err = np.inf
    history = {
        'train_losses': [],
        'train_errs': [],
        'val_errs': [],
        'train_loss2': []
    }
    print('Training the model!')
    print('You can interrupt it at any time.')
    while epoch < num_epochs:
        model.train()
        # model.train_mode()
        epoch += 1
        for x, y in data_loaders['train']:
            if cuda:
                x = x.cuda()
                y = y.cuda()
            iter_ += 1

            optimizer.zero_grad()
            out = model.forward(x)
            loss = model.loss(out, y)
            loss.backward()
            optimizer.step()

            pred = model.predict(out)
            iou = torch.sum(batch_iou(y, pred))
            err_rate = 100 * (1.0 - iou / out.size(0))

            history['train_losses'].append(loss.item())
            history['train_errs'].append(err_rate.item())

            if iter_ % log_every == 0:
                print "Minibatch {0: >6}  | loss {1: >5.2f} | err rate {2: >5.2f}%" \
                      .format(iter_, loss.item(), err_rate.item())

        val_err_rate = compute_error_rate(model, data_loaders['test'], cuda)
        history['val_errs'].append((iter_, val_err_rate.item()))

        if val_err_rate < best_val_err:
            best_epoch = epoch
            best_val_err = val_err_rate
        m = "After epoch {0: >2} | valid err rate: {1: >5.2f}% | doing {2: >3} epochs" \
            .format(epoch, val_err_rate, num_epochs)
        print '{0}\n{1}\n{0}'.format('-' * len(m), m)
Example #5
0
def compute_error_rate(model, data_loader, cuda=True):
    model.eval()
    iou_sum = 0
    num_examples = 0
    for x, y in data_loader:
        if cuda:
            x = x.cuda()
            y = y.cuda()
        with torch.no_grad():
            out = model.forward(x)
            pred = model.predict(out)
            iou_sum += torch.sum(batch_iou(y, pred))
            num_examples += x.size(0)
    return 100.0 * (1.0 - iou_sum / num_examples)
Example #6
0
def calculate_array_matches(ground_truths, predictions, iou_threshold=0.5):
    ious = batch_iou(to_iou_coordinates(ground_truths),
                     to_iou_coordinates(predictions))
    return np.where(iou_threshold < ious, 1.0, 0.0).astype(np.float32)
Example #7
0
def compute_statistics_for_thresholding(all_boxes, all_classes, all_scores,
                                        all_gts, config):
    """Compute tps, fps, fns, and other stuff for computing APs
    
    
    Arguments:
        all_boxes {[type]} -- list of predicted boxes
        all_classes {[type]} -- list of predicted classes
        all_scores {[type]} --list of predicted scores  
        all_gts {[type]} -- list of all y_trues
        config {[type]} -- squeezedet config
    
    Returns:
        [type] -- boxes_per_img , boxes_per_gt, np.stack(all_tps), np.stack(all_fps), np.stack(all_fns), is_gt, all_score_thresholds
    """

    boxes_per_img = []
    boxes_per_gt = []

    all_tps = []
    all_fps = []

    all_fns = []
    all_score_thresholds = [[] for c in range(config.CLASSES)]
    is_gt = [[] for c in range(config.CLASSES)]

    #print(all_score_thresholds)

    #here we compute the false positives, false negatives and true positives of the network predictions
    #we cannot do everything in a numpy array as each image has a different number of filtered detections

    #iterate all batches
    for i in range(len(all_boxes)):

        batch_gt = all_gts[i]

        batch_classes = all_classes[i]

        batch_scores = all_scores[i]

        #shape is batch_size * achors * x
        box_input = batch_gt[:, :, 1:5]
        labels = batch_gt[:, :, 9:]

        #print(labels.shape)

        #iterate images per batch for image level analysis
        for j in range(len(all_boxes[i])):

            # add number of detections
            boxes_per_img.append(len(all_boxes[i][j]))

            #get index of non zero boxes
            non_zero_idx = np.sum(box_input[j][:], axis=-1) > 0

            #get non zero gt boxes
            nonzero_gts = np.reshape(box_input[j][non_zero_idx], [-1, 4])

            # add number of gt boxes
            boxes_per_gt.append(len(nonzero_gts))

            #get labels
            labels_per_image = labels[j]

            #get non zero labels
            nonzero_labels = [
                tuple[0]
                for labels in labels_per_image[non_zero_idx, :].astype(int)
                for tuple in enumerate(labels) if tuple[1] == 1
            ]

            #for every class count the true positives, false positives and false negatives
            tp_per_image = np.zeros(config.CLASSES)
            fp_per_image = np.zeros(config.CLASSES)
            fn_per_image = np.zeros(config.CLASSES)

            #print(batch_classes[j])

            #use this to check if predicted box has already been assigned to a different gt
            assigned_idx = np.zeros_like(batch_classes[j])

            # for every gt per image compute overlaps with detections
            for k in range(len(nonzero_gts)):

                try:
                    #get overlap between gt box and all predictions
                    ious = utils.batch_iou(np.stack(all_boxes[i][j]),
                                           nonzero_gts[k])

                    #use this to check for biggest score
                    current_score = -1
                    #index of best detection
                    current_idx = -1

                    #iterate all the ious
                    for iou_index, iou in enumerate(ious):

                        # check if iou is above threshold, if classes match,
                        # if it has not been assigned before and if the score is bigger than the current best score
                        # if all conditions are satisfied this marked as the current biggest detection
                        if iou > config.IOU_THRESHOLD \
                        and batch_classes[j][iou_index] == nonzero_labels[k] \
                        and not assigned_idx[iou_index]\
                        and batch_scores[j][iou_index] > current_score:

                            #update current score
                            current_score = batch_scores[j][iou_index]
                            #update idx of best
                            current_idx = iou_index

                    #if nothing was assigned to this box add a false negative
                    if current_score < 0:
                        fn_per_image[nonzero_labels[k]] += 1

                        #for mAP calc set this to a gt
                        is_gt[nonzero_labels[k]].append(1)
                        #append 0 as the score, as we did not detect it
                        all_score_thresholds[nonzero_labels[k]].append(0)
                    else:
                        #otherwise add a true positive for the corresponding class
                        tp_per_image[nonzero_labels[k]] += 1
                        # set to ignore assigned box
                        assigned_idx[current_idx] = 1
                        #append it as a gt
                        is_gt[nonzero_labels[k]].append(1)
                        #save threshold
                        all_score_thresholds[nonzero_labels[k]].append(
                            current_score)

                except:

                    fn_per_image[nonzero_labels[k]] = len(nonzero_gts[k])

            #calculate false positives, that is boxes that have not been assigned to a gt
            for index, ai in enumerate(assigned_idx):
                #if box has not been assigned

                if ai == 0:

                    #add a false positive to the corresponding class
                    fp_per_image[batch_classes[j][index]] += 1
                    #add this as a non gt
                    is_gt[batch_classes[j][index]].append(0)
                    #append the predicted score to the predicted class
                    all_score_thresholds[batch_classes[j][index]].append(
                        batch_scores[j][index])

            all_tps.append(tp_per_image)
            all_fns.append(fn_per_image)
            all_fps.append(fp_per_image)

    return boxes_per_img, boxes_per_gt, np.stack(all_tps), np.stack(
        all_fps), np.stack(all_fns), is_gt, all_score_thresholds