示例#1
0
    def parser(lines):
        image_data = []
        box_data = []
        for line in lines:
            image, box = get_random_data(line.numpy().decode(), input_shape, random=random)
            image_data.append(image)
            box_data.append(box)

        image_data = np.array(image_data)
        box_data = np.array(box_data)

        y_true = [tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(box_data, input_shape, anchors, num_classes)]
        image_data = tf.convert_to_tensor(image_data, tf.float32)
        return (image_data, *y_true)
示例#2
0
def compute_ECE(logit_model,
                x: ndarray,
                y: ndarray,
                num_bins: int,
                temperature_scaling: float = 1.0) -> float:
    prediction = logit_model.predict(x)
    prediction_scaled = prediction / temperature_scaling
    logits = convert_to_tensor(prediction_scaled, dtype=float32, name='logits')
    labels_true = convert_to_tensor(argmax(y, axis=1),
                                    dtype=int32,
                                    name='labels_true')
    ece = expected_calibration_error(num_bins=num_bins,
                                     logits=logits,
                                     labels_true=labels_true)
    return ece.numpy()
示例#3
0
文件: test.py 项目: svija/K210-yolo3
def test_parser():
    """ 测试parser函数以支持eager tensor """
    annotation_path = 'train.txt'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    val_split = 0.1
    with open(annotation_path) as f:
        annotation_lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(annotation_lines)
    np.random.seed(None)
    num_val = int(len(annotation_lines) * val_split)
    num_train = len(annotation_lines) - num_val

    batch_size = 32
    input_shape = (416, 416)

    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    lines = tf.convert_to_tensor(annotation_lines[:10], tf.string)
    """ start parser """
    image_data = []
    box_data = []
    for line in lines:
        image, box = get_random_data(line.numpy().decode(),
                                     input_shape,
                                     random=True)
        image_data.append(image)
        box_data.append(box)

    image_data = np.array(image_data)
    box_data = np.array(box_data)

    y_true = [
        tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(
            box_data, input_shape, anchors, num_classes)
    ]
    image_data = tf.convert_to_tensor(image_data, tf.float32)
    return (image_data, *y_true)
示例#4
0
x = layers.GRU(128)(inputs)
# x = layers.Dropout(0.4)(x)
outputs = layers.Dense(1, )(x)
model = tf.keras.Model(inputs, outputs)
print("model initialized")

# choose the first stock
data = stock_data[0]
# compile model and train and evaluate it
print("stock code: ", data.stock_code)
model.compile(
    loss='mean_squared_error',
    optimizer=tf.keras.optimizers.RMSprop(lr=learning_rate),
)
model.fit(
    x=tf.convert_to_tensor(data.train_x),
    y=tf.convert_to_tensor(data.train_y),
    batch_size=batch_size,
    epochs=num_epochs,
    verbose=1,
    # validation_split=0.2,
    initial_epoch=0,
    steps_per_epoch=num_batches,
    # validation_freq=2,
    use_multiprocessing=True)
test_loss = model.evaluate(x=data.test_x,
                           y=data.test_y,
                           verbose=1,
                           use_multiprocessing=False)
print("test_loss:", test_loss)
model.summary()
示例#5
0
def main(ckpt_weights, image_size, output_size, model_def, class_num,
         depth_multiplier, obj_thresh, iou_thresh, train_set, test_image):
    h = Helper(None, class_num, f'data/{train_set}_anchor.npy',
               np.reshape(np.array(image_size), (-1, 2)),
               np.reshape(np.array(output_size), (-1, 2)))
    network = eval(model_def)  # type :yolo_mobilev2
    yolo_model, yolo_model_warpper = network([image_size[0], image_size[1], 3],
                                             len(h.anchors[0]),
                                             class_num,
                                             alpha=depth_multiplier)

    yolo_model_warpper.load_weights(str(ckpt_weights))
    print(INFO, f' Load CKPT {str(ckpt_weights)}')
    orig_img = h._read_img(str(test_image))
    image_shape = orig_img.shape[0:2]
    img, _ = h._process_img(orig_img,
                            true_box=None,
                            is_training=False,
                            is_resize=True)
    """ load images """
    img = tf.expand_dims(img, 0)
    y_pred = yolo_model_warpper.predict(img)
    """ box list """
    _yxyx_box = []
    _yxyx_box_scores = []
    """ preprocess label """
    for l, pred_label in enumerate(y_pred):
        """ split the label """
        pred_xy = pred_label[..., 0:2]
        pred_wh = pred_label[..., 2:4]
        pred_confidence = pred_label[..., 4:5]
        pred_cls = pred_label[..., 5:]
        # box_scores = obj_score * class_score
        box_scores = tf.sigmoid(pred_cls) * tf.sigmoid(pred_confidence)
        # obj_mask = pred_confidence_score[..., 0] > obj_thresh
        """ reshape box  """
        # NOTE tf_xywh_to_all will auto use sigmoid function
        pred_xy_A, pred_wh_A = tf_xywh_to_all(pred_xy, pred_wh, l, h)
        boxes = correct_box(pred_xy_A, pred_wh_A, image_size, image_shape)
        boxes = tf.reshape(boxes, (-1, 4))
        box_scores = tf.reshape(box_scores, (-1, class_num))
        """ append box and scores to global list """
        _yxyx_box.append(boxes)
        _yxyx_box_scores.append(box_scores)

    yxyx_box = tf.concat(_yxyx_box, axis=0)
    yxyx_box_scores = tf.concat(_yxyx_box_scores, axis=0)

    mask = yxyx_box_scores >= obj_thresh
    """ do nms for every classes"""
    _boxes = []
    _scores = []
    _classes = []
    for c in range(class_num):
        class_boxes = tf.boolean_mask(yxyx_box, mask[:, c])
        class_box_scores = tf.boolean_mask(yxyx_box_scores[:, c], mask[:, c])
        select = tf.image.non_max_suppression(class_boxes,
                                              scores=class_box_scores,
                                              max_output_size=30,
                                              iou_threshold=iou_thresh)
        class_boxes = tf.gather(class_boxes, select)
        class_box_scores = tf.gather(class_box_scores, select)
        _boxes.append(class_boxes)
        _scores.append(class_box_scores)
        _classes.append(tf.ones_like(class_box_scores) * c)

    boxes = tf.concat(_boxes, axis=0)
    classes = tf.concat(_classes, axis=0)
    scores = tf.concat(_scores, axis=0)
    """ draw box  """
    font = ImageFont.truetype(font='asset/FiraMono-Medium.otf',
                              size=tf.cast(
                                  tf.floor(3e-2 * image_shape[0] + 0.5),
                                  tf.int32).numpy())

    thickness = (image_shape[0] + image_shape[1]) // 300
    """ show result """
    if len(classes) > 0:
        pil_img = Image.fromarray(orig_img)
        print(f'[top\tleft\tbottom\tright\tscore\tclass]')
        for i, c in enumerate(classes):
            box = boxes[i]
            score = scores[i]
            label = '{:2d} {:.2f}'.format(int(c.numpy()), score.numpy())
            draw = ImageDraw.Draw(pil_img)
            label_size = draw.textsize(label, font)
            top, left, bottom, right = box
            print(
                f'[{top:.1f}\t{left:.1f}\t{bottom:.1f}\t{right:.1f}\t{score:.2f}\t{int(c):2d}]'
            )
            top = max(0, tf.cast(tf.floor(top + 0.5), tf.int32))
            left = max(0, tf.cast(tf.floor(left + 0.5), tf.int32))
            bottom = min(image_shape[0],
                         tf.cast(tf.floor(bottom + 0.5), tf.int32))
            right = min(image_shape[1], tf.cast(tf.floor(right + 0.5),
                                                tf.int32))

            if top - image_shape[0] >= 0:
                text_origin = tf.convert_to_tensor([left, top - label_size[1]])
            else:
                text_origin = tf.convert_to_tensor([left, top + 1])

            for j in range(thickness):
                draw.rectangle([left + j, top + j, right - j, bottom - j],
                               outline=h.colormap[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=h.colormap[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw
        pil_img.show()
    else:
        print(NOTE, ' no boxes detected')
示例#6
0
 def compute_loss():
     divided_prediction = divide(logits, temp)
     loss = reduce_mean(
         softmax_cross_entropy_with_logits_v2(labels=convert_to_tensor(y),
                                              logits=divided_prediction))
     return loss