예제 #1
0
def noise_label(labels):
    id = range(shape(labels['click_position'])[0])
    idx = concatenate(
        [expand_dims(cast(id, int64), axis=1), labels['click_position']],
        axis=1)
    clicked_item = gather_nd(labels['reco'], idx)
    return cast(equal(expand_dims(clicked_item, axis=1), labels['reco']),
                float32)
예제 #2
0
 def test_ficken(self):
     labels = {'click_position': [1, 2], 'reco': [[0, 1, 2], [2, 1, 0]]}
     id = range(shape(labels['click_position'])[0])
     idx = concatenate([
         expand_dims(cast(id, int64), axis=1),
         expand_dims(cast(labels['click_position'], int64), axis=1)
     ],
                       axis=1)
     clicked_item = gather_nd(labels['reco'], idx)
     with self.test_session():
         print(clicked_item.eval())
예제 #3
0
def create_label(click_position, num_labels=10):
    num_rows = shape(click_position)[0]
    row_idx = expand_dims(range(num_rows), axis=1)
    idx = concatenate([row_idx, cast(click_position, int32)], axis=1)
    labels = SparseTensor(indices=cast(idx, int64),
                          values=ones([num_rows]),
                          dense_shape=[num_rows, num_labels])
    return ones([num_rows, num_labels]) - to_dense(labels)
예제 #4
0
def tf_iou(pred_xy: tf.Tensor, pred_wh: tf.Tensor, vaild_xy: tf.Tensor,
           vaild_wh: tf.Tensor) -> tf.Tensor:
    """ calc the iou form pred box with vaild box

    Parameters
    ----------
    pred_xy : tf.Tensor
        pred box shape = [out h, out w, anchor num, 2]

    pred_wh : tf.Tensor
        pred box shape = [out h, out w, anchor num, 2]

    vaild_xy : tf.Tensor
        vaild box shape = [? , 2]

    vaild_wh : tf.Tensor
        vaild box shape = [? , 2]

    Returns
    -------
    tf.Tensor
        iou value shape = [out h, out w, anchor num ,?]
    """
    b1_xy = tf.expand_dims(pred_xy, -2)
    b1_wh = tf.expand_dims(pred_wh, -2)
    b1_wh_half = b1_wh / 2.
    b1_mins = b1_xy - b1_wh_half
    b1_maxes = b1_xy + b1_wh_half

    b2_xy = tf.expand_dims(vaild_xy, 0)
    b2_wh = tf.expand_dims(vaild_wh, 0)
    b2_wh_half = b2_wh / 2.
    b2_mins = b2_xy - b2_wh_half
    b2_maxes = b2_xy + b2_wh_half

    intersect_mins = tf.maximum(b1_mins, b2_mins)
    intersect_maxes = tf.minimum(b1_maxes, b2_maxes)
    intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
    b1_area = b1_wh[..., 0] * b1_wh[..., 1]
    b2_area = b2_wh[..., 0] * b2_wh[..., 1]
    iou = intersect_area / (b1_area + b2_area - intersect_area)

    return iou
예제 #5
0
def main(ckpt_weights, image_size, output_size, model_def, class_num,
         depth_multiplier, obj_thresh, iou_thresh, train_set, test_image):
    h = Helper(None, class_num, f'data/{train_set}_anchor.npy',
               np.reshape(np.array(image_size), (-1, 2)),
               np.reshape(np.array(output_size), (-1, 2)))
    network = eval(model_def)  # type :yolo_mobilev2
    yolo_model, yolo_model_warpper = network([image_size[0], image_size[1], 3],
                                             len(h.anchors[0]),
                                             class_num,
                                             alpha=depth_multiplier)

    yolo_model_warpper.load_weights(str(ckpt_weights))
    print(INFO, f' Load CKPT {str(ckpt_weights)}')
    orig_img = h._read_img(str(test_image))
    image_shape = orig_img.shape[0:2]
    img, _ = h._process_img(orig_img,
                            true_box=None,
                            is_training=False,
                            is_resize=True)
    """ load images """
    img = tf.expand_dims(img, 0)
    y_pred = yolo_model_warpper.predict(img)
    """ box list """
    _yxyx_box = []
    _yxyx_box_scores = []
    """ preprocess label """
    for l, pred_label in enumerate(y_pred):
        """ split the label """
        pred_xy = pred_label[..., 0:2]
        pred_wh = pred_label[..., 2:4]
        pred_confidence = pred_label[..., 4:5]
        pred_cls = pred_label[..., 5:]
        # box_scores = obj_score * class_score
        box_scores = tf.sigmoid(pred_cls) * tf.sigmoid(pred_confidence)
        # obj_mask = pred_confidence_score[..., 0] > obj_thresh
        """ reshape box  """
        # NOTE tf_xywh_to_all will auto use sigmoid function
        pred_xy_A, pred_wh_A = tf_xywh_to_all(pred_xy, pred_wh, l, h)
        boxes = correct_box(pred_xy_A, pred_wh_A, image_size, image_shape)
        boxes = tf.reshape(boxes, (-1, 4))
        box_scores = tf.reshape(box_scores, (-1, class_num))
        """ append box and scores to global list """
        _yxyx_box.append(boxes)
        _yxyx_box_scores.append(box_scores)

    yxyx_box = tf.concat(_yxyx_box, axis=0)
    yxyx_box_scores = tf.concat(_yxyx_box_scores, axis=0)

    mask = yxyx_box_scores >= obj_thresh
    """ do nms for every classes"""
    _boxes = []
    _scores = []
    _classes = []
    for c in range(class_num):
        class_boxes = tf.boolean_mask(yxyx_box, mask[:, c])
        class_box_scores = tf.boolean_mask(yxyx_box_scores[:, c], mask[:, c])
        select = tf.image.non_max_suppression(class_boxes,
                                              scores=class_box_scores,
                                              max_output_size=30,
                                              iou_threshold=iou_thresh)
        class_boxes = tf.gather(class_boxes, select)
        class_box_scores = tf.gather(class_box_scores, select)
        _boxes.append(class_boxes)
        _scores.append(class_box_scores)
        _classes.append(tf.ones_like(class_box_scores) * c)

    boxes = tf.concat(_boxes, axis=0)
    classes = tf.concat(_classes, axis=0)
    scores = tf.concat(_scores, axis=0)
    """ draw box  """
    font = ImageFont.truetype(font='asset/FiraMono-Medium.otf',
                              size=tf.cast(
                                  tf.floor(3e-2 * image_shape[0] + 0.5),
                                  tf.int32).numpy())

    thickness = (image_shape[0] + image_shape[1]) // 300
    """ show result """
    if len(classes) > 0:
        pil_img = Image.fromarray(orig_img)
        print(f'[top\tleft\tbottom\tright\tscore\tclass]')
        for i, c in enumerate(classes):
            box = boxes[i]
            score = scores[i]
            label = '{:2d} {:.2f}'.format(int(c.numpy()), score.numpy())
            draw = ImageDraw.Draw(pil_img)
            label_size = draw.textsize(label, font)
            top, left, bottom, right = box
            print(
                f'[{top:.1f}\t{left:.1f}\t{bottom:.1f}\t{right:.1f}\t{score:.2f}\t{int(c):2d}]'
            )
            top = max(0, tf.cast(tf.floor(top + 0.5), tf.int32))
            left = max(0, tf.cast(tf.floor(left + 0.5), tf.int32))
            bottom = min(image_shape[0],
                         tf.cast(tf.floor(bottom + 0.5), tf.int32))
            right = min(image_shape[1], tf.cast(tf.floor(right + 0.5),
                                                tf.int32))

            if top - image_shape[0] >= 0:
                text_origin = tf.convert_to_tensor([left, top - label_size[1]])
            else:
                text_origin = tf.convert_to_tensor([left, top + 1])

            for j in range(thickness):
                draw.rectangle([left + j, top + j, right - j, bottom - j],
                               outline=h.colormap[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=h.colormap[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw
        pil_img.show()
    else:
        print(NOTE, ' no boxes detected')
예제 #6
0
def create_diffs(positive_scores, scores):
    return expand_dims(positive_scores, axis=1) - scores
예제 #7
0
def lookup_positives(scores, click_position):
    num_rows = shape(scores)[0]
    row_idx = expand_dims(range(num_rows), axis=1)
    idx = concatenate([row_idx, cast(click_position, int32)], axis=1)
    return gather_nd(scores, idx)
예제 #8
0
def predict_scores(features):
    candidate = dense(features['label'], 10)
    anchor = dense(features['anchor_label'], 10)
    scores = matmul(candidate, expand_dims(anchor, axis=1), transpose_b=True)
    return squeeze(scores)