Ejemplo n.º 1
0
 def Grad(unused_g, variables=None):  # pylint: disable=redefined-outer-name
     del variables
     gradient_graph = ops.get_default_graph()
     shape = gen_array_ops.shape(x)
     assert shape.graph is forward_graph
     rank = gen_array_ops.rank(x)
     assert rank.graph is forward_graph
     size = gen_array_ops.size(x)
     assert size.graph is forward_graph
     zeros = array_ops.zeros(shape)
     assert zeros.graph is gradient_graph
     return zeros
Ejemplo n.º 2
0
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
    with ops.name_scope(name, "Shape", [input]) as name:
        if isinstance(
                input,
            (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
            return gen_math_ops.cast(input.dense_shape, out_type)
        else:
            if not context.executing_eagerly():
                input_tensor = ops.convert_to_tensor(input)
                input_shape = input_tensor.get_shape()
                if optimize and input_shape.is_fully_defined():
                    return constant(input_shape.as_list(), out_type, name=name)
            return gen_array_ops.shape(input, name=name, out_type=out_type)
Ejemplo n.º 3
0
 def maxout(inputs, num_units, axis):
     inputs = ops.convert_to_tensor(inputs)
     shape = inputs.get_shape().as_list()
     num_channels = shape[axis]
     shape[axis] = -1
     shape += [num_channels // num_units]
     for i in range(len(shape)):
         if shape[i] is None:
             shape[i] = gen_array_ops.shape(inputs)[i]
     outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape),
                                   -1,
                                   keep_dims=False)
     return outputs
Ejemplo n.º 4
0
    def _apply_dense(self, H, var, error):
        Q = self.get_slot(var, "Q")  # Process noise
        P = self.get_slot(var, "P")  # Covariance matrix
        S = self._Rt + math_ops.matmul(math_ops.matmul(H, P), H, transpose_b=True)
        Sinv = linalg_ops.matrix_inverse(S, name="Sinv")
        K = math_ops.matmul(math_ops.matmul(P, H, transpose_b=True), Sinv)

        #debugP = math_ops.trace(P)/math_ops.cast(gen_array_ops.shape(P)[0], dtype=np.float32)
        #debugK = math_ops.sqrt(math_ops.reduce_sum(math_ops.square(K))/math_ops.cast(gen_array_ops.shape(K)[1], dtype=np.float32))
        #K = Print(K, [debugP, debugK], message="P, K : ")

        dW = math_ops.matmul(K, error)
        update_weights = state_ops.assign_add(var, gen_array_ops.reshape(dW, gen_array_ops.shape(var)), use_locking=self._use_locking)
        update_P = state_ops.assign_add(P, Q - math_ops.matmul(math_ops.matmul(K, S), K, transpose_b=True), use_locking=self._use_locking)

        return control_flow_ops.group(*[update_weights, update_P])
Ejemplo n.º 5
0
  def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs)
    shape = inputs.get_shape().as_list()
    num_channels = shape[self.axis]
    if num_channels % self.num_units:
      raise ValueError('number of features({}) is not '
               'a multiple of num_units({})'
               .format(num_channels, self.num_units))
    shape[self.axis] = -1
    shape += [num_channels // self.num_units]

    # Dealing with batches with arbitrary sizes
    for i in range(len(shape)):
      if shape[i] is None:
        shape[i] = gen_array_ops.shape(inputs)[i]
    outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape), -1, keep_dims=False)

    return outputs
Ejemplo n.º 6
0
  def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs)
    shape = inputs.get_shape().as_list()
    num_channels = shape[self.axis]
    if num_channels % self.num_units:
      raise ValueError('number of features({}) is not '
               'a multiple of num_units({})'
               .format(num_channels, self.num_units))
    shape[self.axis] = -1
    shape += [num_channels // self.num_units]

    # Dealing with batches with arbitrary sizes
    for i in range(len(shape)):
      if shape[i] is None:
        shape[i] = gen_array_ops.shape(inputs)[i]
    outputs = math_ops.reduce_max(gen_array_ops.reshape(inputs, shape), -1, keep_dims=False)

    return outputs
Ejemplo n.º 7
0
    def call(self, inputs):
        print("convert {}".format(inputs))
        inputs = ops.convert_to_tensor(inputs)
        shape = inputs.shape

        if shape[self.axis] % self.num_units != 0:
            raise ValueError('number of features({}) is not '
                             'a multiple of group({})'.format(
                                 shape[self.axis], self.num_units))

        out_channel = int(shape[self.axis].value / self.num_units)
        # Dealing with batches with arbitrary sizes
        batchsize = gen_array_ops.shape(inputs)[0]

        pairing = gen_array_ops.reshape(
            inputs,
            [batchsize, self.size, self.size, out_channel, self.num_units])
        outputs = math_ops.reduce_max(pairing,
                                      axis=self.axis + 1,
                                      keep_dims=False)

        return outputs
Ejemplo n.º 8
0
 def call(self, inputs):
     inputs = ops.convert_to_tensor(inputs)
     shape = gen_array_ops.shape(inputs)
     batchsize = shape[0]
     out_channel = shape[3]
     return gen_array_ops.reshape(inputs, [batchsize, out_channel])