示例#1
0
def logical_xor(x, y, name="LogicalXor"):
  """x ^ y = (x | y) & ~(x & y)."""
  # TODO(alemi) Make this a cwise op if people end up relying on it.
  return gen_math_ops.logical_and(
      gen_math_ops.logical_or(x, y),
      gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
      name=name)
示例#2
0
def jaccard_index(y_true, y_pred, data_format=None):
    '''Jaccard index, or Intersection over Union (IoU). (metric)
    The IoU is thought to be a better measurement to estimate the accuracy for segmentation.
    If both y_true and y_pred are binary, the intersection I(y_true, y_pred) shows the part
    where the prediction is correct, while the union U(y_true, y_pred) contains both correct
    prediction and wrong prediction. I/U shows the proportion of correct prediction.
    Compared to other error functions (like MSE), it is more concentrated on the part where
    y_true=1 or y_pred=1.
    This function is implemented by:
        jacc = logical_and(y_true, y_pred) / logical_or(y_true, y_pred)
    Arguments:
        data_format: 'channels_first' or 'channels_last'. The default setting is generally
                     'channels_last' like other tf.keras APIs.
    Input:
        y_true: label, tensor in any shape, should have at least 3 axes.
        y_pred: prediction, tensor in any shape, should have at least 3 axes.
    Output:
        scalar, the mean Jaccard index between y_true and y_pred over all channels.
    '''
    get_reduced_axes = get_channels(y_true, data_format)
    bin_y_true = gen_math_ops.greater(y_true, 0.5)
    bin_y_pred = gen_math_ops.greater(y_pred, 0.5)
    valNumer = gen_math_ops.logical_and(bin_y_pred, bin_y_true)
    valDomin = gen_math_ops.logical_or(bin_y_pred, bin_y_true)
    valNumer = math_ops.reduce_sum(math_ops.cast(valNumer, dtype=y_pred.dtype), axis=get_reduced_axes)
    valDomin = math_ops.reduce_sum(math_ops.cast(valDomin, dtype=y_pred.dtype), axis=get_reduced_axes)
    return math_ops.reduce_mean(math_ops.div_no_nan(valNumer, valDomin))
        def __body(w_, e_, mask, b):
            e = math_ops.cast(distributions.Beta((self.__mf - 1.0) / 2.0,
                                                 (self.__mf - 1.0) / 2.0).
                              sample(shape, seed=seed), dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)
            w = (1.0 - (1.0 + b) * e) / (1.0 - (1.0 - b) * e)
            x = (1.0 - b) / (1.0 + b)
            c = self.scale * x + (self.__mf - 1) * math_ops.log1p(-x**2)

            tmp = tf.clip_by_value(x * w, 0, 1 - 1e-16)
            reject = gen_math_ops.less(((self.__mf - 1.0) * math_ops.log(1.0 - tmp) +
                                        self.scale * w - c),
                                       math_ops.log(u))
            accept = gen_math_ops.logical_not(reject)

            w_ = array_ops.where(gen_math_ops.logical_and(mask, accept), w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(mask, accept), e, e_)
            mask = array_ops.where(gen_math_ops.logical_and(mask, accept),
                                   reject, mask)

            return w_, e_, mask, b
示例#4
0
        def __body(w_, e_, bool_mask, b, a, d):
            e = math_ops.cast(Beta((self.__mf - 1) / 2,
                                   (self.__mf - 1) / 2).sample(shape,
                                                               seed=seed),
                              dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)

            w = (1 - (1 + b) * e) / (1 - (1 - b) * e)
            t = (2 * a * b) / (1 - (1 - b) * e)

            accept = gen_math_ops.greater(
                ((self.__mf - 1) * math_ops.log(t) - t + d), math_ops.log(u))
            reject = gen_math_ops.logical_not(accept)

            w_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept),
                                 w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(bool_mask, accept),
                                 e, e_)
            bool_mask = array_ops.where(
                gen_math_ops.logical_and(bool_mask, accept), reject, bool_mask)

            return w_, e_, bool_mask, b, a, d
示例#5
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = []

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (1. /
                       (1. + self.decay *
                        math_ops.cast(self.iterations, K.dtype(self.decay))))

        with ops.control_dependencies(
            [state_ops.assign_add(self.iterations, 1)]):
            t = math_ops.cast(self.iterations, K.floatx())
        lr_bc = gen_math_ops.sqrt(1. - math_ops.pow(self.beta_2, t)) / (
            1. - math_ops.pow(self.beta_1, t))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        lams = [K.zeros(1, dtype=K.dtype(p)) for p in params]
        conds = [K.variable(False, dtype='bool') for p in params]
        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]
        self.weights = [self.iterations] + ms + vs + vhats + lams + conds

        for p, g, m, v, vhat, lam, cond in zip(params, grads, ms, vs, vhats,
                                               lams, conds):
            beta_g = m_switch(cond, 1.0, 1.0 - self.beta_1)
            m_t = (self.beta_1 * m) + beta_g * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
            if self.amsgrad:
                vhat_t = math_ops.maximum(vhat, v_t)
                p_t_ada = lr_bc * m_t / (gen_math_ops.sqrt(vhat_t) +
                                         self.epsilon)
                self.updates.append(state_ops.assign(vhat, vhat_t))
            else:
                p_t_ada = lr_bc * m_t / (gen_math_ops.sqrt(v_t) + self.epsilon)
            gamma_den = math_ops.reduce_sum(p_t_ada * g)
            gamma = math_ops.reduce_sum(gen_math_ops.square(p_t_ada)) / (
                math_ops.abs(gamma_den) +
                self.epsilon) * (gen_math_ops.sign(gamma_den) + self.epsilon)
            lam_t = (self.beta_2 * lam) + (1. - self.beta_2) * gamma
            lam_prime = lam / (1. - math_ops.pow(self.beta_2, t))
            lam_t_prime = lam_t / (1. - math_ops.pow(self.beta_2, t))
            lg_err = math_ops.abs(lam_t_prime - gamma)
            cond_update = gen_math_ops.logical_or(
                gen_math_ops.logical_and(
                    gen_math_ops.logical_and(self.iterations > 1,
                                             lg_err < 1e-5), lam_t > 0),
                cond)[0]
            lam_update = m_switch(cond_update, lam, lam_t)
            self.updates.append(state_ops.assign(lam, lam_update))
            self.updates.append(state_ops.assign(cond, cond_update))

            p_t_sgd = (1. - self.beta_1) * lam_prime * m_t

            self.updates.append(state_ops.assign(m, m_t))
            self.updates.append(state_ops.assign(v, v_t))

            new_p = m_switch(cond, p - lr * p_t_sgd, p - lr * p_t_ada)

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(state_ops.assign(p, new_p))
        return self.updates
示例#6
0
 def while_cond(iterate_index, *state):
     return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
示例#7
0
 def while_cond(epoch_number, iterate, *state):
     del iterate
     return gen_math_ops.logical_and(epoch_number < 1,
                                     extra_test(*state))
示例#8
0
 def while_cond(iterate_index, *state):
   return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
示例#9
0
 def while_cond(epoch_number, iterate, *state):
   del iterate
   return gen_math_ops.logical_and(epoch_number < 1, extra_test(*state))
示例#10
0
 def while_cond(iterate_index, *state):
   if extra_test is not None:
     return gen_math_ops.logical_and(iterate_index < n, extra_test(*state))
   return iterate_index < n
    def _get_queue_ops_stale(self, var_update_op: ops.Operation,
                             source_op: ops.Operation, is_chief: bool,
                             is_trainable: bool) -> List[ops.Operation]:
        """
        Get queue operations for staleness synchronous parameter update.

        Maintain a list of queues of size equal to <staleness>. At the beginning of each call of this function
        (either by the chief worker or other workers), it checks whether each queue is not full. If yes, it pushes
        a token to each queue. If not, it does nothing (a no_op).
        Then, for the current worker that calls this function, it dequeues a token from its corresponding queue
        (indexed by its worker id).
        The potential enqueue operations and definite dequeue operation are grouped together, and have to be
        finished before the model moves on to the next step.
        As at each invocation of this function, a row of empty space in the list of queues will be filled. Thus
        <staleness> number of consecutive dequeue operations can be done by a worker without blocking, achieving
        stale synchronous parameter update with maximum <staleness> steps difference.

        Args:
            var_update_op: The op

        Returns:
            A list of queue operations.
        """
        var_op = var_update_op.inputs[UPDATE_OP_VAR_POS].op

        var_update_sync_queues = \
            [data_flow_ops.FIFOQueue(self._staleness, [dtypes.bool], shapes=None,
                                     name='%s_update_sync_queue_%d' % (var_op.name, i),
                                     shared_name='%s_update_sync_queue_%d' % (var_op.name, i))
             for i in range(self.num_workers)]

        # Enqueue one token to every queue if all queues are not full.
        def _enqueue_row_op():
            enqueue_ops = []
            for q in var_update_sync_queues:
                enqueue_ops.append(q.enqueue(False))
            enqueue_a_row_ops = control_flow_ops.group(*enqueue_ops)
            return enqueue_a_row_ops

        def _no_op():
            return gen_control_flow_ops.no_op()

        switch_cond = gen_array_ops.identity(True)
        for q in var_update_sync_queues:
            switch_cond = gen_math_ops.logical_and(
                switch_cond,
                gen_math_ops.less(q.size(),
                                  gen_array_ops.identity(self._staleness)))

        enqueue_a_row_ops = control_flow_ops.cond(switch_cond, _enqueue_row_op,
                                                  _no_op)

        queue_ops = [enqueue_a_row_ops]

        if is_chief:
            if is_trainable:
                var_update_deps = [
                    self._var_op_to_accum_apply_op[var_op], source_op
                ]
            else:
                var_update_deps = [var_update_op]
            with ops.control_dependencies(var_update_deps):
                dequeue = var_update_sync_queues[self.worker_id].dequeue()
        else:
            # wait for execution of var_update_op
            if is_trainable:
                with ops.control_dependencies(
                    [self._var_op_to_accum_apply_op[var_op]]):
                    dequeue = var_update_sync_queues[self.worker_id].dequeue()
            else:
                dequeue = var_update_sync_queues[self.worker_id].dequeue()
        queue_ops.append(dequeue)

        return queue_ops