示例#1
0
def yolo_correct_boxes(box_xy, box_wh, input_shape,
                       image_shape):  #调整框的相对大小以适应原始图像的长宽比
    '''Get corrected boxes'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape.
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
示例#2
0
def YOLOCorrectBoxes(box_xy, box_wh, input_shape, image_shape):
    '''Get Corrected Boxes.'''
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]

    input_shape = K.cast(input_shape, K.dtype(box_yx))
    image_shape = K.cast(image_shape, K.dtype(box_yx))
    new_shape = K.round(image_shape * K.min(input_shape / image_shape))

    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape

    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_max = box_yx + (box_hw / 2.)

    boxes = K.concatenate([
        box_mins[..., 0:1],  #y min
        box_mins[..., 1:2],  #x min
        box_max[..., 0:1],  #y max
        box_max[..., 1:2]  #x max
    ])

    #Scale boxes back to original image shape
    boxes *= K.concatenate([image_shape, image_shape])
    return boxes
示例#3
0
def softmax_with_mask(tensor_and_mask):
    input_tensor, mask_tensor = tensor_and_mask
    min_tensor = K.min(input_tensor, axis=1, keepdims=True)
    positive_tensor = (min_tensor - input_tensor) * mask_tensor
    max_tensor = K.max(positive_tensor, axis=1, keepdims=True)
    exp_tensor = K.exp(positive_tensor - max_tensor)
    masked_tensor = exp_tensor * mask_tensor
    summed_tensor = K.sum(masked_tensor, axis=1, keepdims=True)
    return masked_tensor / (summed_tensor + 1e-10)
示例#4
0
  def call(self, x, mask=None):
    """ The actual processing in the layer: Normalize, padd, then convolution.
    """
    input_1, input_2 = x
    input_shape = input_1.shape
    
    # assert input_shape == input_2._keras_shape
    
    self.H = input_shape[1]
    self.W = input_shape[2]
    self.C = input_shape[3]

    # normalization
    if self.use_norm is 'euclidean':
      input_1 = K.l2_normalize(input_1, axis=2)
      input_2 = K.l2_normalize(input_2, axis=2)

    if self.use_norm is 'scaling':
      input_1_min = K.min(input_1, axis=2, keepdims=True)
      input_1_max = K.max(input_1, axis=2, keepdims=True)
      input_1 = (input_1 - input_1_min) / (input_1_max - input_1_min + 0.000001)
  
      input_2_min = K.min(input_2, axis=2, keepdims=True)
      input_2_max = K.max(input_2, axis=2, keepdims=True)
      input_2 = (input_2 - input_2_min) / (input_2_max - input_2_min + 0.000001)

    if self.use_norm is 'standardization':
      input_1 = (input_1 - K.mean(input_1, axis=2, keepdims=True)) + 0.00001
      input_1 = K.l2_normalize(input_1, axis=2)
      input_2 = (input_2 - K.mean(input_2, axis=2, keepdims=True)) + 0.00001
      input_2 = K.l2_normalize(input_2, axis=2)

    # Pad the first input1 circular, so that a correlation can be computed for
    # every horizontal position    
    padding1 = RangePadding2D(padding=self.W // 2)(input_1)

    # tf.scan的原理解析:https://zhuanlan.zhihu.com/p/96503559
    out = tf.scan(self.single_sample_corr,
                  elems=[padding1, input_2],
                  initializer=(K.zeros((int(self.H), int(self.W), int(self.output_dim))))
                  )
    return out
    def call(self, y):
        # Sanity Check
        if isinstance(y, list):
            raise ValueError('TSG layer has only 1 input')
        # y = tf_print(y, [y], message='{}: The unconstrained action is:'.format(y.name.split('/')[0]), summarize=-1)
        y = check_numerics(y, 'Problem with input y')

        # Calculate A.c
        Ac = tensordot(self.A_graph, self.c_graph, 1)

        # Calculate b - Ac
        bMinusAc = self.b_graph - Ac

        # Calculate y - c
        yMinusc = y - self.c_graph

        # Calculate A.(y - c)
        ADotyMinusc = K.sum((self.A_graph * expand_dims(yMinusc, -2)), axis=2)

        # Do elem-wise division
        intersection_points = bMinusAc / (ADotyMinusc + K.epsilon()
                                          )  # Do we need the K.epsilon()?

        # Enforce 0 <= intersection_points <= 1 because the point must lie between c and y
        greater_1 = K.greater(intersection_points,
                              K.ones_like(intersection_points))
        candidate_alpha = K.switch(greater_1,
                                   K.ones_like(intersection_points) + 1,
                                   intersection_points)

        less_0 = K.less(candidate_alpha, K.zeros_like(intersection_points))
        candidate_alpha = K.switch(less_0,
                                   K.ones_like(intersection_points) + 1,
                                   candidate_alpha)

        # Find farthest intersection point from y to get projection point
        alpha = K.min(candidate_alpha, axis=-1, keepdims=True)

        # If it is an interior point, y itself is the projection point
        interior_point = K.greater(alpha, K.ones_like(alpha))
        alpha = K.switch(interior_point, K.ones_like(alpha), alpha)
        # alpha = tf_print(alpha, [alpha], message="{}: The value of alpha is: ".format(alpha.name.split('/')[0]))

        # Return \alpha.y + (1 - \alpha).c
        z = alpha * y + ((1 - alpha) * self.c_graph)
        # z = tf_print(z, [z], message='{}: The constrained action is:'.format(z.name.split('/')[0]), summarize=-1)

        return z
示例#6
0
def f(x):
    max_x = K.max(x, axis=1)
    min_x = K.min(x, axis=1)
    return max_x, min_x
 def call(self, inputs):
   if self.data_format == 'channels_last':
     return backend.min(inputs, axis=[1, 2])
   else:
     return backend.min(inputs, axis=[2, 3])
示例#8
0
def calc_adj_mat_error(batch_imgs, batch_size):
    adj_mat = k.zeros(shape=(108, 108))

    for o in range(batch_size):
        img = batch_imgs[o]
        classes = np.unique(img)
        classes = classes[1:]
        if 255 in classes:
            classes = classes[:-1]
        mat_contour = []

        for i in range(len(classes)):

            value = classes[i]
            mask = cv2.inRange(img, int(value), int(value))
            per, _ = cv2.findContours(image=mask, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)

            mat_total = k.zeros(shape=(1, 2))

            for q in range(len(per)):

                tmp = per[q]
                mat = k.zeros(shape=(len(tmp), 2))
                for j in range(len(tmp)):
                    point = tmp[j]
                    x = point[0][0]
                    y = point[0][1]
                    mat[j][0] = x
                    mat[j][1] = y

                mat_total = k.concatenate((mat_total, mat), axis=0)

            mat_contour.append(mat_total[1:])

        for i in range(len(classes)):
            tmp = mat_contour[i]

            for j in range(i + 1, len(classes)):
                # for j in range(0, len(classes)):
                min_v = sys.maxsize
                second_mat = mat_contour[j]

                for p in range(len(tmp)):
                    first_mat = tmp[p]

                    dif = first_mat - second_mat
                    # dif = np.multiply(dif, dif)
                    dif = dif * dif
                    sum_mat = k.sum(dif, 1)
                    sqrt = k.sqrt(sum_mat)

                    min_tmp = k.min(sqrt)
                    if min_tmp < min_v:
                        min_v = min_tmp

                if min_v <= 1:
                    adj_mat[classes[i]][classes[j]] = 1 + adj_mat[classes[i]][classes[j]]

    # adj_mat = normalize(adj_mat, axis=1, norm='l1')

    return adj_mat