Ejemplo n.º 1
0
def variable_summaries(var, name):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.scalar_summary('mean_' + name, mean)
        # 计算参数的标准差
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.scalar_summary('stddev_' + name, stddev)
        tf.scalar_summary('max_' + name, tf.reduce_max(var))
        tf.scalar_summary('min_' + name, tf.reduce_min(var))
        # 用直方图记录参数的分布
        tf.histogram_summary('histogram_' + name, var)
Ejemplo n.º 2
0
def tf_xywh_to_grid(all_true_xy: tf.Tensor, all_true_wh: tf.Tensor, layer: int,
                    h: Helper) -> [tf.Tensor, tf.Tensor]:
    """convert true label xy wh to grid scale

    Parameters
    ----------
    all_true_xy : tf.Tensor

    all_true_wh : tf.Tensor

    layer : int
        layer index
    h : Helper


    Returns
    -------
    [tf.Tensor, tf.Tensor]
        grid_true_xy, grid_true_wh shape = [out h ,out w,anchor num , 2 ]
    """
    with tf.name_scope('xywh_to_grid_%d' % layer):
        grid_true_xy = (all_true_xy *
                        h.out_hw[layer][::-1]) - h.xy_offset[layer]
        grid_true_wh = tf.log(all_true_wh / h.anchors[layer])
    return grid_true_xy, grid_true_wh
Ejemplo n.º 3
0
def tf_xywh_to_all(grid_pred_xy: tf.Tensor, grid_pred_wh: tf.Tensor,
                   layer: int, h: Helper) -> [tf.Tensor, tf.Tensor]:
    """ rescale the pred raw [grid_pred_xy,grid_pred_wh] to [0~1]

    Parameters
    ----------
    grid_pred_xy : tf.Tensor

    grid_pred_wh : tf.Tensor

    layer : int
        the output layer
    h : Helper


    Returns
    -------
    tuple

        after process, [all_pred_xy, all_pred_wh] 
    """
    with tf.name_scope('xywh_to_all_%d' % layer):
        all_pred_xy = (tf.sigmoid(grid_pred_xy[..., :]) +
                       h.xy_offset[layer]) / h.out_hw[layer][::-1]
        all_pred_wh = tf.exp(grid_pred_wh[..., :]) * h.anchors[layer]
    return all_pred_xy, all_pred_wh
Ejemplo n.º 4
0
def tf_reshape_box(true_xy_A: tf.Tensor, true_wh_A: tf.Tensor,
                   p_xy_A: tf.Tensor, p_wh_A: tf.Tensor, layer: int,
                   helper: Helper) -> tuple:
    """ reshape the xywh to [?,h,w,anchor_nums,true_box_nums,2]
        NOTE  must use obj mask in atrue xywh !
    Parameters
    ----------
    true_xy_A : tf.Tensor
        shape will be [true_box_nums,2]

    true_wh_A : tf.Tensor
        shape will be [true_box_nums,2]

    p_xy_A : tf.Tensor
        shape will be [?,h,w,anhor_nums,2]

    p_wh_A : tf.Tensor
        shape will be [?,h,w,anhor_nums,2]

    layer : int

    helper : Helper


    Returns
    -------
    tuple
        true_cent, true_box_wh, pred_cent, pred_box_wh
    """
    with tf.name_scope('reshape_box_%d' % layer):
        true_cent = true_xy_A[tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis,
                              ...]
        true_box_wh = true_wh_A[tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis,
                                ...]

        true_cent = tf.tile(true_cent, [
            helper.batch_size, helper.out_hw[layer][0],
            helper.out_hw[layer][1], helper.anchor_number, 1, 1
        ])
        true_box_wh = tf.tile(true_box_wh, [
            helper.batch_size, helper.out_hw[layer][0],
            helper.out_hw[layer][1], helper.anchor_number, 1, 1
        ])

        pred_cent = p_xy_A[..., tf.newaxis, :]
        pred_box_wh = p_wh_A[..., tf.newaxis, :]
        pred_cent = tf.tile(pred_cent, [1, 1, 1, 1, tf.shape(true_xy_A)[0], 1])
        pred_box_wh = tf.tile(
            pred_box_wh, [1, 1, 1, 1, tf.shape(true_wh_A)[0], 1])

    return true_cent, true_box_wh, pred_cent, pred_box_wh
Ejemplo n.º 5
0
def calc_ignore_mask(t_xy_A: tf.Tensor, t_wh_A: tf.Tensor, p_xy: tf.Tensor,
                     p_wh: tf.Tensor, obj_mask: tf.Tensor, iou_thresh: float,
                     layer: int, helper: Helper) -> tf.Tensor:
    """clac the ignore mask

    Parameters
    ----------
    t_xy_A : tf.Tensor
        raw ture xy,shape = [batch size,h,w,anchors,2]
    t_wh_A : tf.Tensor
        raw true wh,shape = [batch size,h,w,anchors,2]
    p_xy : tf.Tensor
        raw pred xy,shape = [batch size,h,w,anchors,2]
    p_wh : tf.Tensor
        raw pred wh,shape = [batch size,h,w,anchors,2]
    obj_mask : tf.Tensor
        old obj mask,shape = [batch size,h,w,anchors]
    iou_thresh : float
        iou thresh 
    helper : Helper
        Helper obj

    Returns
    -------
    tf.Tensor
    ignore_mask : 
        ignore_mask, shape = [batch size, h, w, anchors, 1]
    """
    with tf.name_scope('calc_mask_%d' % layer):
        pred_xy, pred_wh = tf_xywh_to_all(p_xy, p_wh, layer, helper)

        # def lmba(bc):
        #     vaild_xy = tf.boolean_mask(t_xy_A[bc], obj_mask[bc])
        #     vaild_wh = tf.boolean_mask(t_wh_A[bc], obj_mask[bc])
        #     iou_score = tf_iou(pred_xy[bc], pred_wh[bc], vaild_xy, vaild_wh)
        #     best_iou = tf.reduce_max(iou_score, axis=-1, keepdims=True)
        #     return tf.cast(best_iou < iou_thresh, tf.float32)
        # return map_fn(lmba, tf.range(helper.batch_size), dtype=tf.float32)
        ignore_mask = []
        for bc in range(helper.batch_size):
            vaild_xy = tf.boolean_mask(t_xy_A[bc], obj_mask[bc])
            vaild_wh = tf.boolean_mask(t_wh_A[bc], obj_mask[bc])
            iou_score = tf_iou(pred_xy[bc], pred_wh[bc], vaild_xy, vaild_wh)
            best_iou = tf.reduce_max(iou_score, axis=-1, keepdims=True)
            ignore_mask.append(tf.cast(best_iou < iou_thresh, tf.float32))
    return tf.stack(ignore_mask)
Ejemplo n.º 6
0
import matplotlib.pyplot as plt

train_times = 50000
base_path = "/Users/coorchice/Desktop/ML/model/ml/BreadBasket/"
save_path = base_path + str(train_times) + "/"

BBDATA = read_datas('data/')

x_data = tf.placeholder(tf.float32, [None, 135])
y_data = tf.placeholder(tf.float32, [None])
W = tf.Variable(tf.truncated_normal([135, 1], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[1]))
y = tf.nn.relu(tf.matmul(x_data, W) + b)

# 按照交叉熵公式计算交叉熵
with tf.name_scope('loss'):
    # cross_entropy = -tf.reduce_sum(y_data * tf.log(y))
    cross_entropy = tf.reduce_mean((tf.square((y - y_data))))
tf.scalar_summary('loss', cross_entropy)

# init_lr = 0.00001
lr = tf.Variable(0.00005, trainable=False)
# global_step = tf.Variable(0., trainable=False)
# lr = tf.train.exponential_decay(init_lr, global_step=global_step, decay_steps=10000, decay_rate=0.5, staircase=True)

# 使用梯度下降法不断的调整变量,寻求最小的交叉熵
# 此处使用梯度下降法以0.01的学习速率最小化交叉熵
train_step = tf.train.GradientDescentOptimizer(lr).minimize(cross_entropy)
# train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(cross_entropy)

# correct_prediction = tf.equal(y, y_data)