Пример #1
0
def _ceil_divide_int(x, y):
    """Returns ceil(x / y) as tf.int32."""
    z = tf.truediv(x, y)
    tf.debugging.check_numerics(
        z, message='_ceil_divide_int output is NaN or Inf.')
    z = tf.math.ceil(z)
    z = tf.cast(z, dtype=tf.int32)
    return z
Пример #2
0
def _classification_loss(pred_labels, gt_labels, num_matched_boxes):
    """Computes the classification loss.

    Computes the classification loss with hard negative mining.
    Args:
      pred_labels: a dict from index to tensor of predicted class. The shape
      of the tensor is [batch_size, num_anchors, num_classes].
      gt_labels: a list of tensor that represents the classification groundtruth
      targets. The shape is [batch_size, num_anchors, 1].
      num_matched_boxes: the number of anchors that are matched to a groundtruth
      targets. This is used as the loss normalizater.
    Returns:
      box_loss: a float32 representing total box regression loss.
    """
    with tf.name_scope("class_loss_scope"):
        keys = sorted(pred_labels.keys())
        batch_size = gt_labels[0].shape[0]

        assert (len(keys) == 1)
        assert (keys[0] == 'flatten')
        gt_label = gt_labels[0]
        pred_label = pred_labels['flatten']

        cross_entropy = tf.reshape(_softmax_cross_entropy_mme(
            pred_label, gt_label), [batch_size, -1],
                                   name="cross_entropy")

        mask = tf.greater(gt_label, 0, name="mask")
        float_mask = tf.cast(mask, tf.float32, name="float_mask")

        # Hard example mining
        neg_masked_cross_entropy = cross_entropy * (1 - float_mask)

        num_neg_boxes = tf.expand_dims(
            tf.minimum(
                tf.cast(num_matched_boxes, tf.int32) *
                constants.NEGS_PER_POSITIVE, constants.NUM_SSD_BOXES), -1)
        _, neg_sorted_cross_indices = tf.nn.top_k(
            neg_masked_cross_entropy,
            tf.shape(neg_masked_cross_entropy)[1])  # descending order

        _, neg_sorted_cross_rank = tf.nn.top_k(
            -1 * neg_sorted_cross_indices,
            tf.shape(neg_sorted_cross_indices)[1])  # ascending order
        topk_neg_mask = tf.cast(
            tf.math.less(neg_sorted_cross_rank, num_neg_boxes), tf.float32)

        add = tf.add(float_mask, topk_neg_mask, name="add")

        class_loss = tf.reduce_sum(tf.multiply(cross_entropy, add, name="mul"),
                                   axis=1,
                                   name="reduce_sum")

        normalized_class_loss = tf.truediv(class_loss,
                                           num_matched_boxes,
                                           name="normalized_class_loss")

    return tf.reduce_mean(normalized_class_loss, name="class_loss_1")
Пример #3
0
    def _add_loss_graph(self):
        """Define the loss operation."""
        mc = self.mc

        with tf.variable_scope('class_regression') as scope:
            # cross-entropy: q * -log(p) + (1-q) * -log(1-p)
            # add a small value into log to prevent blowing up
            self.class_loss = tf.truediv(tf.reduce_sum(
                (self.labels * (-tf.log(self.pred_class_probs + mc.EPSILON)) +
                 (1 - self.labels) *
                 (-tf.log(1 - self.pred_class_probs + mc.EPSILON))) *
                self.input_mask * mc.LOSS_COEF_CLASS),
                                         self.num_objects,
                                         name='class_loss')
            tf.add_to_collection('losses', self.class_loss)

        with tf.variable_scope('confidence_score_regression') as scope:
            input_mask = tf.reshape(self.input_mask,
                                    [mc.BATCH_SIZE, mc.ANCHORS])
            self.conf_loss = tf.reduce_mean(tf.abs(
                tf.reduce_sum(
                    tf.square((self.ious - self.pred_conf)) *
                    (input_mask * mc.LOSS_COEF_CONF_POS / self.num_objects +
                     (1 - input_mask) * mc.LOSS_COEF_CONF_NEG /
                     (mc.ANCHORS - self.num_objects)),
                    reduction_indices=[1])),
                                            name='confidence_loss')
            tf.add_to_collection('losses', self.conf_loss)
            tf.summary.scalar('mean iou',
                              tf.reduce_sum(self.ious) / self.num_objects)

        with tf.variable_scope('bounding_box_regression') as scope:
            self.bbox_loss = tf.truediv(tf.reduce_sum(
                mc.LOSS_COEF_BBOX *
                tf.square(self.input_mask *
                          (self.pred_box_delta - self.box_delta_input))),
                                        self.num_objects,
                                        name='bbox_loss')
            tf.add_to_collection('losses', self.bbox_loss)

        # add above losses as well as weight decay losses to form the total loss
        self.loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
Пример #4
0
def _safe_div(numerator, denominator):
    """Divides two tensors element-wise, returning 0 if the denominator is <= 0.

  Args:
    numerator: A real `Tensor`.
    denominator: A real `Tensor`, with dtype matching `numerator`.

  Returns:
    0 if `denominator` <= 0, else `numerator` / `denominator`
  """
    t = tf.truediv(numerator, denominator)
    zero = tf.zeros_like(t, dtype=denominator.dtype)
    condition = tf.greater(denominator, zero)
    zero = tf.cast(zero, t.dtype)
    return tf.where(condition, t, zero)
Пример #5
0
def calculate_signal_to_noise_ratio_from_power(signal_power, noise_power,
                                               epsilon):
    """Computes the signal to noise ratio given signal_power and noise_power.

  Args:
    signal_power: A tensor of unknown shape and arbitrary rank.
    noise_power: A tensor matching the signal tensor.
    epsilon: An optional float for numerical stability, since silences
      can lead to divide-by-zero.

  Returns:
    A tensor of size [...] with SNR computed between matching slices of the
    input signal and noise tensors.
  """
    # Pre-multiplication and change of logarithm base.
    constant = tf.cast(10.0 / tf.log(10.0), signal_power.dtype)

    return constant * tf.log(
        tf.truediv(signal_power + epsilon, noise_power + epsilon))
Пример #6
0
def ioa(boxlist1, boxlist2, scope=None):
    """Computes pairwise intersection-over-area between box collections.

  intersection-over-area (IOA) between two boxes box1 and box2 is defined as
  their intersection area over box2's area. Note that ioa is not symmetric,
  that is, ioa(box1, box2) != ioa(box2, box1).

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise ioa scores.
  """
    with tf.name_scope(scope, 'IOA'):
        intersections = intersection(boxlist1, boxlist2)
        areas = tf.expand_dims(area(boxlist2), 0)
        return tf.truediv(intersections, areas)
Пример #7
0
def matched_iou(boxlist1, boxlist2, scope=None):
    """Compute intersection-over-union between corresponding boxes in boxlists.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding N boxes
    scope: name scope.

  Returns:
    a tensor with shape [N] representing pairwise iou scores.
  """
    with tf.name_scope(scope, 'MatchedIOU'):
        intersections = matched_intersection(boxlist1, boxlist2)
        areas1 = area(boxlist1)
        areas2 = area(boxlist2)
        unions = areas1 + areas2 - intersections
        return tf.where(tf.equal(intersections, 0.0),
                        tf.zeros_like(intersections),
                        tf.truediv(intersections, unions))
Пример #8
0
def iou(boxlist1, boxlist2, scope=None):
    """Computes pairwise intersection-over-union between box collections.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise iou scores.
  """
    with tf.name_scope(scope, 'IOU'):
        intersections = intersection(boxlist1, boxlist2)
        areas1 = area(boxlist1)
        areas2 = area(boxlist2)
        unions = (tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) -
                  intersections)
        return tf.where(tf.equal(intersections, 0.0),
                        tf.zeros_like(intersections),
                        tf.truediv(intersections, unions))
Пример #9
0
def calculate_signal_to_noise_ratio(signal, noise, epsilon=1.0e-5):
    """Computes the signal to noise ratio given signal and noise.

  Args:
    signal: A [..., samples] tensor of unknown shape and arbitrary rank.
    noise: A tensor matching the signal tensor.
    epsilon: An optional float for numerical stability, since silences
      can lead to divide-by-zero.

  Returns:
    A tensor of size [...] with SNR computed between matching slices of the
    input signal and noise tensors.
  """
    def power(x):
        return tf.reduce_sum(tf.square(x), reduction_indices=[-1])

    # Pre-multiplication and change of logarithm base.
    constant = tf.cast(10.0 / tf.log(10.0), signal.dtype)

    return constant * tf.log(
        tf.truediv(power(signal) + epsilon,
                   power(noise) + epsilon))
Пример #10
0
 def normalize(v):
   return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32))
def normalize_to_target(inputs,
                        target_norm_value,
                        dim,
                        epsilon=1e-7,
                        trainable=True,
                        scope='NormalizeToTarget',
                        summarize=True):
  """L2 normalizes the inputs across the specified dimension to a target norm.

  This op implements the L2 Normalization layer introduced in
  Liu, Wei, et al. "SSD: Single Shot MultiBox Detector."
  and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.
  "Parsenet: Looking wider to see better." and is useful for bringing
  activations from multiple layers in a convnet to a standard scale.

  Note that the rank of `inputs` must be known and the dimension to which
  normalization is to be applied should be statically defined.

  TODO(jonathanhuang): Add option to scale by L2 norm of the entire input.

  Args:
    inputs: A `Tensor` of arbitrary size.
    target_norm_value: A float value that specifies an initial target norm or
      a list of floats (whose length must be equal to the depth along the
      dimension to be normalized) specifying a per-dimension multiplier
      after normalization.
    dim: The dimension along which the input is normalized.
    epsilon: A small value to add to the inputs to avoid dividing by zero.
    trainable: Whether the norm is trainable or not
    scope: Optional scope for variable_scope.
    summarize: Whether or not to add a tensorflow summary for the op.

  Returns:
    The input tensor normalized to the specified target norm.

  Raises:
    ValueError: If dim is smaller than the number of dimensions in 'inputs'.
    ValueError: If target_norm_value is not a float or a list of floats with
      length equal to the depth along the dimension to be normalized.
  """
  with tf.variable_scope(scope, 'NormalizeToTarget', [inputs]):
    if not inputs.get_shape():
      raise ValueError('The input rank must be known.')
    input_shape = inputs.get_shape().as_list()
    input_rank = len(input_shape)
    if dim < 0 or dim >= input_rank:
      raise ValueError(
          'dim must be non-negative but smaller than the input rank.')
    if not input_shape[dim]:
      raise ValueError('input shape should be statically defined along '
                       'the specified dimension.')
    depth = input_shape[dim]
    if not (isinstance(target_norm_value, float) or
            (isinstance(target_norm_value, list) and
             len(target_norm_value) == depth) and
            all([isinstance(val, float) for val in target_norm_value])):
      raise ValueError('target_norm_value must be a float or a list of floats '
                       'with length equal to the depth along the dimension to '
                       'be normalized.')
    if isinstance(target_norm_value, float):
      initial_norm = depth * [target_norm_value]
    else:
      initial_norm = target_norm_value
    target_norm = slim.model_variable(
        name='weights',
        dtype=tf.float32,
        initializer=tf.constant(initial_norm, dtype=tf.float32),
        trainable=trainable)
    if summarize:
      mean = tf.reduce_mean(target_norm)
      tf.summary.scalar(tf.get_variable_scope().name, mean)
    lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
    mult_shape = input_rank*[1]
    mult_shape[dim] = depth
    return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)
Пример #12
0
# tf.add: 덧셈
# tf.subtract: 뺄셈
# tf.multiply: 곱셈
# tf.truediv: 나눗셈의 몫
# tf.mod: 나눗셈의 나머지
# tf.abs: 절대값

sess = tf.Session()
a = tf.constant(17)
b = tf.constant(5)

# 덧셈
c = tf.add(a, b)
print(sess.run(c))
# 뺄셈
c = tf.subtract(a, b)
print(sess.run(c))
# 곱셈
c = tf.multiply(a, b)
print(sess.run(c))
# 나눗셈 몫
c = tf.truediv(a, b)
print(sess.run(c))
# 나눗셈 나머지
c = tf.mod(a, b)
print(sess.run(c))
# 절대값
c = tf.abs(-a)
print(sess.run(c))

Пример #13
0
 def normalize(v):
   return tf.truediv(v, global_state.denominator)
Пример #14
0
 def normalize(v):
     try:
         return tf.truediv(
             v, tf.cast(self._num_microbatches, tf.float32))
     except TypeError:
         return None
Пример #15
0
# initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)

# create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1, 1]))
b = tf.Variable(tf.random_uniform(shape=[1, 1]))

# In[2]:

with tf.Session() as sess:
    fomula = tf.add(tf.matmul(x_data, A), b)
    demm_numer = tf.abs(tf.subtract(fomula, y_target))  # numerator
    demm_denom = tf.sqrt(tf.add(tf.square(A), 1))  # denominator
    loss = tf.reduce_mean(tf.truediv(demm_numer, demm_denom))  # 점과 직선사이의 거리

    opt = tf.train.GradientDescentOptimizer(learning_rate=0.15)
    train_step = opt.minimize(loss)

    init = tf.global_variables_initializer()
    init.run()

    loss_vec = []
    batch_size = 125

    for i in range(1000):
        rand_idx = np.random.choice(len(x_val), size=batch_size)
        rand_x = x_val[rand_idx].reshape(-1, 1)
        rand_y = y_val[rand_idx].reshape(-1, 1)
Пример #16
0
    def call(self, x):
        input_image, y_pred, y_true, true_boxes = x

        # adjust the shape of the y_predict [batch, grid_h, grid_w, 3, 4+1+nb_class]
        y_pred = tf.reshape(
            y_pred,
            tf.concat([tf.shape(input=y_pred)[:3],
                       tf.constant([3, -1])],
                      axis=0))

        # initialize the masks
        object_mask = tf.expand_dims(y_true[..., 4], 4)

        # the variable to keep track of number of batches processed
        batch_seen = tf.Variable(0.)

        # compute grid factor and net factor
        grid_h = tf.shape(input=y_true)[1]
        grid_w = tf.shape(input=y_true)[2]
        grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32),
                                 [1, 1, 1, 1, 2])

        net_h = tf.shape(input=input_image)[1]
        net_w = tf.shape(input=input_image)[2]
        net_factor = tf.reshape(tf.cast([net_w, net_h], tf.float32),
                                [1, 1, 1, 1, 2])
        """
        Adjust prediction
        """
        pred_box_xy = (self.cell_grid[:, :grid_h, :grid_w, :, :] +
                       tf.sigmoid(y_pred[..., :2]))  # sigma(t_xy) + c_xy
        pred_box_wh = y_pred[..., 2:4]  # t_wh
        pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[..., 4]),
                                       4)  # adjust confidence
        pred_box_class = y_pred[..., 5:]  # adjust class probabilities
        """
        Adjust ground truth
        """
        true_box_xy = y_true[..., 0:2]  # (sigma(t_xy) + c_xy)
        true_box_wh = y_true[..., 2:4]  # t_wh
        true_box_conf = tf.expand_dims(y_true[..., 4], 4)
        true_box_class = tf.argmax(input=y_true[..., 5:], axis=-1)
        """
        Compare each predicted box to all true boxes
        """
        # initially, drag all objectness of all boxes to 0
        conf_delta = pred_box_conf - 0

        # then, ignore the boxes which have good overlap with some true box
        true_xy = true_boxes[..., 0:2] / grid_factor
        true_wh = true_boxes[..., 2:4] / net_factor

        true_wh_half = true_wh / 2.
        true_mins = true_xy - true_wh_half
        true_maxes = true_xy + true_wh_half

        pred_xy = tf.expand_dims(pred_box_xy / grid_factor, 4)
        pred_wh = tf.expand_dims(
            tf.exp(pred_box_wh) * self.anchors / net_factor, 4)

        pred_wh_half = pred_wh / 2.
        pred_mins = pred_xy - pred_wh_half
        pred_maxes = pred_xy + pred_wh_half

        intersect_mins = tf.maximum(pred_mins, true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)

        intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

        true_areas = true_wh[..., 0] * true_wh[..., 1]
        pred_areas = pred_wh[..., 0] * pred_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores = tf.truediv(intersect_areas, union_areas)

        best_ious = tf.reduce_max(input_tensor=iou_scores, axis=4)
        conf_delta *= tf.expand_dims(
            tf.cast(best_ious < self.ignore_thresh, dtype=tf.float32), 4)
        """
        Compute some online statistics
        """
        true_xy = true_box_xy / grid_factor
        true_wh = tf.exp(true_box_wh) * self.anchors / net_factor

        true_wh_half = true_wh / 2.
        true_mins = true_xy - true_wh_half
        true_maxes = true_xy + true_wh_half

        pred_xy = pred_box_xy / grid_factor
        pred_wh = tf.exp(pred_box_wh) * self.anchors / net_factor

        pred_wh_half = pred_wh / 2.
        pred_mins = pred_xy - pred_wh_half
        pred_maxes = pred_xy + pred_wh_half

        intersect_mins = tf.maximum(pred_mins, true_mins)
        intersect_maxes = tf.minimum(pred_maxes, true_maxes)
        intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]

        true_areas = true_wh[..., 0] * true_wh[..., 1]
        pred_areas = pred_wh[..., 0] * pred_wh[..., 1]

        union_areas = pred_areas + true_areas - intersect_areas
        iou_scores = tf.truediv(intersect_areas, union_areas)
        iou_scores = object_mask * tf.expand_dims(iou_scores, 4)

        count = tf.reduce_sum(input_tensor=object_mask)
        count_noobj = tf.reduce_sum(input_tensor=1 - object_mask)
        detect_mask = tf.cast((pred_box_conf * object_mask) >= 0.5,
                              dtype=tf.float32)
        class_mask = tf.expand_dims(
            tf.cast(tf.equal(tf.argmax(input=pred_box_class, axis=-1),
                             true_box_class),
                    dtype=tf.float32), 4)
        recall50 = tf.reduce_sum(
            input_tensor=tf.cast(iou_scores >= 0.5, dtype=tf.float32) *
            detect_mask * class_mask) / (count + 1e-3)
        recall75 = tf.reduce_sum(
            input_tensor=tf.cast(iou_scores >= 0.75, dtype=tf.float32) *
            detect_mask * class_mask) / (count + 1e-3)
        avg_iou = tf.reduce_sum(input_tensor=iou_scores) / (count + 1e-3)
        avg_obj = tf.reduce_sum(input_tensor=pred_box_conf *
                                object_mask) / (count + 1e-3)
        avg_noobj = tf.reduce_sum(input_tensor=pred_box_conf *
                                  (1 - object_mask)) / (count_noobj + 1e-3)
        avg_cat = tf.reduce_sum(input_tensor=object_mask *
                                class_mask) / (count + 1e-3)
        """
        Warm-up training
        """
        batch_seen = tf.assign_add(batch_seen, 1.)

        true_box_xy, true_box_wh, xywh_mask = tf.cond(
            pred=tf.less(batch_seen, self.warmup_batches + 1),
            true_fn=lambda: [
                true_box_xy +
                (0.5 + self.cell_grid[:, :grid_h, :grid_w, :, :]) *
                (1 - object_mask), true_box_wh + tf.zeros_like(true_box_wh) *
                (1 - object_mask),
                tf.ones_like(object_mask)
            ],
            false_fn=lambda: [true_box_xy, true_box_wh, object_mask])
        """
        Compare each true box to all anchor boxes
        """
        wh_scale = tf.exp(true_box_wh) * self.anchors / net_factor
        wh_scale = tf.expand_dims(
            2 - wh_scale[..., 0] * wh_scale[..., 1],
            axis=4)  # the smaller the box, the bigger the scale

        xy_delta = xywh_mask * (pred_box_xy -
                                true_box_xy) * wh_scale * self.xywh_scale
        wh_delta = xywh_mask * (pred_box_wh -
                                true_box_wh) * wh_scale * self.xywh_scale
        conf_delta = object_mask * (
            pred_box_conf - true_box_conf) * self.obj_scale + (
                1 - object_mask) * conf_delta * self.noobj_scale
        class_delta = object_mask * \
                      tf.expand_dims(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class), 4) * \
                      self.class_scale

        loss_xy = tf.reduce_sum(input_tensor=tf.square(xy_delta),
                                axis=list(range(1, 5)))
        loss_wh = tf.reduce_sum(input_tensor=tf.square(wh_delta),
                                axis=list(range(1, 5)))
        loss_conf = tf.reduce_sum(input_tensor=tf.square(conf_delta),
                                  axis=list(range(1, 5)))
        loss_class = tf.reduce_sum(input_tensor=class_delta,
                                   axis=list(range(1, 5)))

        loss = loss_xy + loss_wh + loss_conf + loss_class

        loss = tf.Print(loss, [grid_h, avg_obj],
                        message='avg_obj \t\t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_noobj],
                        message='avg_noobj \t\t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_iou],
                        message='avg_iou \t\t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, avg_cat],
                        message='avg_cat \t\t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, recall50],
                        message='recall50 \t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, recall75],
                        message='recall75 \t',
                        summarize=1000)
        loss = tf.Print(loss, [grid_h, count],
                        message='count \t',
                        summarize=1000)
        loss = tf.Print(loss, [
            grid_h,
            tf.reduce_sum(input_tensor=loss_xy),
            tf.reduce_sum(input_tensor=loss_wh),
            tf.reduce_sum(input_tensor=loss_conf),
            tf.reduce_sum(input_tensor=loss_class)
        ],
                        message='loss xy, wh, conf, class: \t',
                        summarize=1000)

        return loss * self.grid_scale
Пример #17
0
    def build_model(self, learning_rate=[0.001, 0.01]):
        '''
        Model - wide and deep - built using tflearn
        '''
        n_cc = len(self.continuous_columns)
        n_categories = 1  # two categories: is_idv and is_not_idv
        input_shape = [None, n_cc]
        if self.verbose:
            print("=" * 77 + " Model %s (type=%s)" %
                  (self.name, self.model_type))
            print("  Input placeholder shape=%s" % str(input_shape))
        wide_inputs = tflearn.input_data(shape=input_shape, name="wide_X")
        if not isinstance(learning_rate, list):
            learning_rate = [learning_rate, learning_rate]  # wide, deep
        if self.verbose:
            print("  Learning rates (wide, deep)=%s" % learning_rate)

        with tf.name_scope(
                "Y"):  # placeholder for target variable (i.e. trainY input)
            Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")

        with tf.variable_scope(None, "cb_unit", [wide_inputs]) as scope:
            central_bias = tflearn.variables.variable(
                'central_bias',
                shape=[1],
                initializer=tf.constant_initializer(np.random.randn()),
                trainable=True,
                restore=True)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/cb_unit',
                                 central_bias)

        if 'wide' in self.model_type:
            wide_network = self.wide_model(wide_inputs, n_cc)
            network = wide_network
            wide_network_with_bias = tf.add(wide_network,
                                            central_bias,
                                            name="wide_with_bias")

        if 'deep' in self.model_type:
            deep_network = self.deep_model(wide_inputs, n_cc)
            deep_network_with_bias = tf.add(deep_network,
                                            central_bias,
                                            name="deep_with_bias")
            if 'wide' in self.model_type:
                network = tf.add(wide_network, deep_network)
                if self.verbose:
                    print("Wide + deep model network %s" % network)
            else:
                network = deep_network

        network = tf.add(network, central_bias, name="add_central_bias")

        # add validation monitor summaries giving confusion matrix entries
        with tf.name_scope('Monitors'):
            predictions = tf.cast(tf.greater(network, 0), tf.int64)
            print("predictions=%s" % predictions)
            Ybool = tf.cast(Y_in, tf.bool)
            print("Ybool=%s" % Ybool)
            pos = tf.boolean_mask(predictions, Ybool)
            neg = tf.boolean_mask(predictions, ~Ybool)
            psize = tf.cast(tf.shape(pos)[0], tf.int64)
            nsize = tf.cast(tf.shape(neg)[0], tf.int64)
            true_positive = tf.reduce_sum(pos, name="true_positive")
            false_negative = tf.subtract(psize,
                                         true_positive,
                                         name="false_negative")
            false_positive = tf.reduce_sum(neg, name="false_positive")
            true_negative = tf.subtract(nsize,
                                        false_positive,
                                        name="true_negative")
            overall_accuracy = tf.truediv(tf.add(true_positive, true_negative),
                                          tf.add(nsize, psize),
                                          name="overall_accuracy")
        vmset = [
            true_positive, true_negative, false_positive, false_negative,
            overall_accuracy
        ]

        trainable_vars = tf.trainable_variables()
        tv_deep = [v for v in trainable_vars if v.name.startswith('deep_')]
        tv_wide = [v for v in trainable_vars if v.name.startswith('wide_')]

        if self.verbose:
            print("DEEP trainable_vars")
            for v in tv_deep:
                print("  Variable %s: %s" % (v.name, v))
            print("WIDE trainable_vars")
            for v in tv_wide:
                print("  Variable %s: %s" % (v.name, v))

        if 'wide' in self.model_type:
            if not 'deep' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(
                wide_network_with_bias,
                placeholder=Y_in,
                optimizer='sgd',
                #loss='roc_auc_score',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],
                validation_monitors=vmset,
                trainable_vars=tv_wide,
                op_name="wide_regression",
                name="Y")

        if 'deep' in self.model_type:
            if not 'wide' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(
                deep_network_with_bias,
                placeholder=Y_in,
                optimizer='adam',
                #loss='roc_auc_score',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[1],
                validation_monitors=vmset
                if not 'wide' in self.model_type else None,
                trainable_vars=tv_deep,
                op_name="deep_regression",
                name="Y")

        if self.model_type == 'wide+deep':  # learn central bias separately for wide+deep
            tflearn.regression(
                network,
                placeholder=Y_in,
                optimizer='adam',
                loss='binary_crossentropy',
                metric="accuracy",
                learning_rate=learning_rate[0],  # use wide learning rate
                trainable_vars=[central_bias],
                op_name="central_bias_regression",
                name="Y")

        self.model = tflearn.DNN(
            network,
            tensorboard_verbose=self.tensorboard_verbose,
            max_checkpoints=5,
            checkpoint_path="%s/%s.tfl" % (self.checkpoints_dir, self.name),
        )

        if self.verbose:
            print("Target variables:")
            for v in tf.get_collection(tf.GraphKeys.TARGETS):
                print("  variable %s: %s" % (v.name, v))

            print("=" * 77)
Пример #18
0
    if isinstance(target_norm_value, float):
      initial_norm = depth * [target_norm_value]
    else:
      initial_norm = target_norm_value
    target_norm = slim.model_variable(
        name='weights',
        dtype=tf.float32,
        initializer=tf.constant(initial_norm, dtype=tf.float32),
        trainable=trainable)
    if summarize:
      mean = tf.reduce_mean(target_norm)
      tf.summary.scalar(tf.get_variable_scope().name, mean)
    lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
    mult_shape = input_rank*[1]
    mult_shape[dim] = depth
    return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)


def batch_position_sensitive_crop_regions(images,
                                          boxes,
                                          crop_size,
                                          num_spatial_bins,
                                          global_pool,
                                          parallel_iterations=64):
  """Position sensitive crop with batches of images and boxes.

  This op is exactly like `position_sensitive_crop_regions` below but operates
  on batches of images and boxes. See `position_sensitive_crop_regions` function
  below for the operation applied per batch element.

  Args: