Example #1
0
def sliced_l3(source, target, size=16, nbins=32, alpha=-1, name='idt'):
    src_shape = source.get_shape().as_list()
    tar_shape = target.get_shape().as_list()
    sbatch = src_shape[0]
    tbatch = tar_shape[0]

    height = src_shape[1]
    width = src_shape[2]

    w0, _ = tf.qr(tf.random_normal([height, height], mean=0.0, stddev=1.0))
    w1, _ = tf.qr(
        tf.random_normal([width * 3, width * 3], mean=0.0, stddev=1.0))

    w0_ = tf.expand_dims(w0, 0)
    w1_ = tf.expand_dims(w1, 0)

    w0_s = tf.tile(w0_, [sbatch, 1, 1])
    w1_s = tf.tile(w1_, [sbatch, 1, 1])
    src = tf.reshape(source,
                     [sbatch, height, width * 3])  # NOTE!!!!!!!!!!!!!!!!
    src_proj = tf.matmul(tf.matmul(w0_s, src), w1_s)

    w0_t = tf.tile(w0_, [tbatch, 1, 1])
    w1_t = tf.tile(w1_, [tbatch, 1, 1])
    tar = tf.reshape(target, [tbatch, height, width * 3])
    tar_proj = tf.matmul(tf.matmul(w0_t, tar), w1_t)

    source_proj = tf.reshape(src_proj, [sbatch, height * width * 3])
    target_proj = tf.reshape(tar_proj, [tbatch, height * width * 3])

    # src_out = optimal(source_proj, target_proj, sbatch, tbatch, height * width * 3, nbins, alpha)

    return wasser_1d(source_proj, target_proj, sbatch, tbatch,
                     height * width * 3, nbins, alpha)
Example #2
0
 def testWrongDimensions(self):
   # The input to qr should be a tensor of at least rank 2.
   scalar = tf.constant(1.)
   with self.assertRaisesRegexp(ValueError,
                                "Shape must be at least rank 2 but is rank 0"):
     tf.qr(scalar)
   vector = tf.constant([1., 2.])
   with self.assertRaisesRegexp(ValueError,
                                "Shape must be at least rank 2 but is rank 1"):
     tf.qr(vector)
 def re_unitarize(self, W):
     # TODO: check this.
     #_, U, V = tf.svd(W, full_matrices=True, compute_uv=True)
     # W = tf.matmul(U, tf.transpose(tf.conj(V)))
     W, _ = tf.qr(W)
     W = tf.Print(W, [tf.constant(0)], 'step with qr.')
     return W
Example #4
0
    def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype
        # Check the shape
        if len(shape) < 3 or len(shape) > 5:
            raise ValueError("The tensor to initialize must be at least "
                             "three-dimensional and at most five-dimensional")

        if shape[-2] > shape[-1]:
            raise ValueError("In_filters cannot be greater than out_filters.")

        # Generate a random matrix
        a = tf.random_normal([shape[-1], shape[-1]],
                             dtype=dtype,
                             seed=self.seed)
        # Compute the qr factorization
        q, r = tf.qr(a, full_matrices=False)
        # Make Q uniform
        d = tf.diag_part(r)
        # ph = D / math_ops.abs(D)
        q *= tf.sign(d)
        q = q[:shape[-2], :]
        q *= tf.sqrt(tf.cast(self.gain, dtype=dtype))
        if len(shape) == 3:
            weight = tf.scatter_nd([[(shape[0] - 1) // 2]],
                                   tf.expand_dims(q, 0), shape)
        elif len(shape) == 4:
            weight = tf.scatter_nd([[(shape[0] - 1) // 2,
                                     (shape[1] - 1) // 2]],
                                   tf.expand_dims(q, 0), shape)
        else:
            weight = tf.scatter_nd([[(shape[0] - 1) // 2, (shape[1] - 1) // 2,
                                     (shape[2] - 1) // 2]],
                                   tf.expand_dims(q, 0), shape)
        return weight
Example #5
0
def sliced_l1(source, target, fz=3, nbins=32, alpha=-1, name='idt'):
    f_num = fz * fz * 3
    src_shape = source.get_shape().as_list()
    tar_shape = target.get_shape().as_list()
    sbatch = src_shape[0]
    tbatch = tar_shape[0]
    ndepth = np.prod(src_shape[1:])

    weight, _ = tf.qr(tf.random_normal([f_num, f_num]))
    weight = tf.reshape(weight, [fz, fz, 3, f_num])
    src_proj = tf.nn.relu(
        tf.nn.conv2d(source, weight, strides=[1, 1, 1, 1], padding='SAME'))
    tar_proj = tf.nn.relu(
        tf.nn.conv2d(target, weight, strides=[1, 1, 1, 1], padding='SAME'))

    src = tf.nn.max_pool(src_proj,
                         ksize=[1, 2, 2, 1],
                         strides=[1, 1, 1, 1],
                         padding="SAME")
    tar = tf.nn.max_pool(tar_proj,
                         ksize=[1, 2, 2, 1],
                         strides=[1, 1, 1, 1],
                         padding="SAME")

    # weight1, _ = tf.qr(tf.random_normal([f_num, f_num]))
    # weight1 = tf.reshape(weight1, [1, 1, f_num, f_num])
    # src = tf.nn.elu(tf.nn.conv2d(src, weight1, strides=[1, 1, 1, 1], padding='SAME'))
    # tar = tf.nn.elu(tf.nn.conv2d(tar, weight1, strides=[1, 1, 1, 1], padding='SAME'))

    sbat = sbatch
    tbat = tbatch
    # ndepth = 64
    # return ID_wasserstein(src_proj, tar_proj, sbat, tbat, ndepth, nbins, alpha)
    # return wasser_1d(src_proj, tar_proj, sbat, tbat, ndepth, nbins, alpha)
    return tf.reduce_mean(tf.abs(src - tar))
Example #6
0
def project_l1(source, target, fz=3, nbins=32, alpha=-1, name='idt'):
    f_num = fz * fz * 3

    wei, _ = tf.qr(tf.random_normal([f_num, f_num]))
    weight = tf.reshape(wei, [fz, fz, 3, f_num])
    src_proj = tf.nn.conv2d(source,
                            weight,
                            strides=[1, fz, fz, 1],
                            padding='VALID')
    tar_proj = tf.nn.conv2d(target,
                            weight,
                            strides=[1, fz, fz, 1],
                            padding='VALID')

    src_shape = src_proj.get_shape().as_list()
    tar_shape = tar_proj.get_shape().as_list()
    sbatch = src_shape[0]
    tbatch = tar_shape[0]
    ndepth = np.prod(src_shape[1:])

    src_new = optimal(tf.reshape(src_proj, [-1, ndepth]),
                      tf.reshape(tar_proj, [-1, ndepth]), sbatch, tbatch,
                      ndepth, nbins, alpha)
    src_new = tf.reshape(src_new, src_proj.get_shape())
    weight_t = tf.reshape(tf.transpose(wei), [fz, fz, 3, f_num])
    src_out = tf.nn.conv2d_transpose(src_new,
                                     tf.transpose(weight),
                                     source.get_shape(),
                                     strides=[1, fz, fz, 1],
                                     padding='VALID')

    return src_out
    def tensor_qr(self,
                  legs1,
                  legs2,
                  new_legs,
                  restrict_mode=True,
                  name='tensor_qr',
                  *args,
                  **kw):
        """对node做qr分解.

        参数依次是Q侧leg, R侧leg, 和新产生的leg名称."""
        with tf.name_scope(name):
            assert set(legs1) | set(legs2) >= set(self.legs) and set(
                legs1) & set(legs2) == set(), 'qr legs not correct'
            if restrict_mode:
                assert set(legs1) | set(legs2) == set(
                    self.legs), 'qr legs not correct'
            legs1 = [i for i in self.legs if i in legs1]
            legs2 = [i for i in self.legs if i in legs2]
            transposed = self.tensor_transpose([*legs1, *legs2])
            size1 = np.prod(transposed.data.shape[:len(legs1)], dtype=int)
            size2 = np.prod(transposed.data.shape[len(legs1):], dtype=int)
            tensor1, tensor2 = tf.qr(
                tf.reshape(transposed.data, [size1, size2]), *args, **kw)
            assert tensor1.shape[0] == size1
            assert tensor2.shape[-1] == size2
            tensor1 = tf.reshape(tensor1,
                                 [*transposed.data.shape[:len(legs1)], -1])
            tensor2 = tf.reshape(tensor2,
                                 [-1, *transposed.data.shape[len(legs1):]])
            if not isinstance(new_legs, list):
                new_legs = [new_legs, new_legs]
            return Node(tensor1,
                        [*legs1, new_legs[0]]), Node(tensor2,
                                                     [new_legs[1], *legs2])
Example #8
0
def _orthogonalize_tt_cores_right_to_left(tt):
    """Orthogonalize TT-cores of a TT-object in the right to left order.

  Args:
    tt: TenosorTrain or a TensorTrainBatch.

  Returns:
    The same type as the input `tt` (TenosorTrain or a TensorTrainBatch).
  """
    # Left to right orthogonalization.
    ndims = tt.ndims()
    raw_shape = shapes.lazy_raw_shape(tt)
    tt_ranks = shapes.lazy_tt_ranks(tt)
    prev_rank = tt_ranks[ndims]
    # Copy cores references so we can change the cores.
    tt_cores = list(tt.tt_cores)
    for core_idx in range(ndims - 1, 0, -1):
        curr_core = tt_cores[core_idx]
        # TT-ranks could have changed on the previous iteration, so `tt_ranks` can
        # be outdated for the current TT-rank, but should be valid for the next
        # TT-rank.
        curr_rank = prev_rank
        prev_rank = tt_ranks[core_idx]
        if tt.is_tt_matrix():
            curr_mode_left = raw_shape[0][core_idx]
            curr_mode_right = raw_shape[1][core_idx]
            curr_mode = curr_mode_left * curr_mode_right
        else:
            curr_mode = raw_shape[0][core_idx]

        qr_shape = (prev_rank, curr_mode * curr_rank)
        curr_core = tf.reshape(curr_core, qr_shape)
        curr_core, triang = tf.qr(tf.transpose(curr_core))
        curr_core = tf.transpose(curr_core)
        triang = tf.transpose(triang)
        if triang.get_shape().is_fully_defined():
            triang_shape = triang.get_shape().as_list()
        else:
            triang_shape = tf.shape(triang)
        # The TT-rank could have changed: if qr_shape is e.g. 4 x 10, than q would
        # be of size 4 x 4 and r would be 4 x 10, which means that the next rank
        # should be changed to 4.
        prev_rank = triang_shape[1]
        if tt.is_tt_matrix():
            new_core_shape = (prev_rank, curr_mode_left, curr_mode_right,
                              curr_rank)
        else:
            new_core_shape = (prev_rank, curr_mode, curr_rank)
        tt_cores[core_idx] = tf.reshape(curr_core, new_core_shape)

        prev_core = tf.reshape(tt_cores[core_idx - 1], (-1, triang_shape[0]))
        tt_cores[core_idx - 1] = tf.matmul(prev_core, triang)

    if tt.is_tt_matrix():
        first_core_shape = (1, raw_shape[0][0], raw_shape[1][0], prev_rank)
    else:
        first_core_shape = (1, raw_shape[0][0], prev_rank)
    tt_cores[0] = tf.reshape(tt_cores[0], first_core_shape)
    # TODO: infer the tt_ranks.
    return TensorTrain(tt_cores, tt.get_raw_shape())
Example #9
0
 def get_u_init_for_g(self, g):
     N_g = tf.shape(g)[0]  # number of datapoints in this cluster
     gt = tf.transpose(g)
     q, r = tf.qr(gt, full_matrices=False)
     idx = [j for j in xrange(args.rank)]
     qq = tf.gather(tf.transpose(q), idx)
     qq = tf.transpose(qq)
     return qq
Example #10
0
def _variable_with_orth_weight_decay(name1, shape):
    s1 = tf.cast(shape[2], tf.int32)
    s2 = tf.cast(shape[2]/2, tf.int32)
    w0_init, _ = tf.qr(tf.random_normal([s1, s2], mean=0.0, stddev=1.0))
    w0 = tf.get_variable(name1, initializer=w0_init)
    tmp1 = tf.reshape(w0, (1, s1, s2))
    tmp2 = tf.reshape(tf.transpose(w0), (1, s2, s1))
    tmp1 = tf.tile(tmp1, [shape[0], 1, 1])
    tmp2 = tf.tile(tmp2, [shape[0], 1, 1])
    return tmp1, tmp2
Example #11
0
    def stiefel_update(self, param, constraints, grad, moment, lr):
        """
        Override the Stiefel updates accordingly.

        Parameters
        ----------
        param
        constraint
        grad

        Returns
        -------
        corresponding Stiefel updates.
        """
        p = param
        m = moment
        g = grad

        new_g = g - K.dot(p, matrix_sym_op(K.dot(K.transpose(p), g)))

        v = self.momentum * m - lr * new_g  # velocity
        # v.name = p.name + '_v'
        # m.name = p.name + '_m'
        self.updates.append(K.update(m, v))

        # if self.nesterov:
        #     new_p = p + self.momentum * v - lr * new_g
        # else:
        new_p = p + v
        p_shape = new_p.get_shape()
        if p_shape[0]._value > p_shape[1]._value:
            new_p, _ = tf.qr(new_p, full_matrices=False)
        else:
            new_p, _ = tf.qr(tf.transpose(new_p), full_matrices=False)
            new_p = tf.transpose(new_p)
        # apply constraints
        if p in constraints:
            c = constraints[p]
            new_p = c(new_p)

        self.updates.append(K.update(p, new_p))
Example #12
0
def matrix_qr():
    """
    qr分解
    :return:
    """
    isses = tf.InteractiveSession()
    A = tf.Variable(tf.random_normal(shape=(4, 4)))
    A.initializer.run()

    logger.info("A\n%s" % A.eval())
    logger.info("tf.qr(A)\n {0}".format(tf.qr(A)))
    isses.close()
Example #13
0
def row_dets(mat):
    (n, np1) = mat.shape
    q, r = tf.qr(mat)
    dets = np.zeros(np1)
    dets[np1-1] = 1.0
    for lw in range(2, np1+1):
            i = np1 - lw
            ik = i + 1
            dets[i] = -np.dot(qr[i, ik:np1], dets[ik:np1])/qr[i, i]
    dets *= np.sign(np.prod(np.diag(qr))) # just for the sign
    #dets *= np.prod(np.diag(qr)) # to get the scale exactly
    return dets
Example #14
0
def row_dets(mat):
    (n, np1) = mat.shape
    q, r = tf.qr(mat)
    dets = np.zeros(np1)
    dets[np1-1] = 1.0
    for lw in range(2, np1+1):
            i = np1 - lw
            ik = i + 1
            dets[i] = -np.dot(qr[i, ik:np1], dets[ik:np1])/qr[i, i]
    dets *= np.sign(np.prod(np.diag(qr))) # just for the sign
    #dets *= np.prod(np.diag(qr)) # to get the scale exactly
    return dets
Example #15
0
def sliced_l2(source, target, size=16, nbins=32, alpha=-1, name='idt'):
    # target = tf.image.resize_nearest_neighbor(target, size=(size, size))
    # source = tf.image.resize_nearest_neighbor(source, size=(size, size))

    src_shape = source.get_shape().as_list()
    tar_shape = target.get_shape().as_list()
    sbatch = src_shape[0]
    tbatch = tar_shape[0]
    ndepth = np.prod(src_shape[1:])

    weight, _ = tf.qr(tf.random_normal([ndepth, ndepth]))
    src_proj = tf.matmul(tf.reshape(source, [-1, ndepth]), weight)
    tar_proj = tf.matmul(tf.reshape(target, [-1, ndepth]), weight)

    return wasser_1d(src_proj, tar_proj, sbatch, tbatch, ndepth, nbins, alpha)
Example #16
0
 def _orthogonal_matrix(n):
     """Construct an n x n orthogonal matrix.
     Args:
         n: Dimension.
     Returns:
         A n x n orthogonal matrix.
     """
     seed = None
     a = tf.random_normal([n, n], dtype=dtype, seed=seed)
     if seed:
         seed += 1
     q, r = tf.qr(a)
     d = tf.diag_part(r)
     # make q uniform
     q *= tf.sign(d)
     return q
Example #17
0
def idt_vec(source, target, nbins=32, alpha=-1, name='idt'):
    src_shape = source.get_shape().as_list()
    tar_shape = target.get_shape().as_list()
    sbatch = src_shape[0]
    tbatch = tar_shape[0]
    ndepth = np.prod(src_shape[1:])

    initializer, _ = tf.qr(tf.random_normal([ndepth, ndepth]))
    with tf.variable_scope(name):
        weight = tf.get_variable('w_proj', initializer=initializer)
        src_proj = tf.matmul(source, weight)
        tar_proj = tf.matmul(target, weight)

        src_new = optimal(src_proj, tar_proj, sbatch, tbatch, ndepth, nbins,
                          alpha)
        src_out = tf.matmul(src_new, tf.transpose(weight))
    return src_out
Example #18
0
def get_ortho_weights(var, gain):
    ''' compute the orthogonal initialization, this is only an approximate '''
    num_rows = 1
    for dim in var.shape[:-1]:
        num_rows *= dim
    num_cols = var.shape[-1]
    flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
                                                                   num_cols)
    a = tf.reshape(tf.nn.l2_normalize(var), flat_shape)
    # using svd would be better approximation but tf.qr seems faster
    q, r = tf.qr(a, full_matrices=False)
    d = tf.diag_part(r)
    q *= tf.sign(d)
    if num_rows < num_cols:
        q = tf.matrix_transpose(q)
    # gain is used to scale the new weights, needed for deeper networks
    return tf.reshape(gain * q, var.shape)
Example #19
0
def retract(X, G):
    # retract tangent vector U on X to tangent space of Stiefel

    # first dim of X,G are number of stiefels (product space)
    #k = X.shape[0]
    #assert(k == G.shape[0])

    #if k == 1:
    # Calculate 'thin' qr decomposition of X + G
    Q, R = tf.qr(X + G)
    # Unflip any flipped signs
    XNew = tf.matmul(Q, tf.diag(tf.sign(tf.sign(tf.diag_part(R)) + .5)))
    #else:
    #    XNew = X + G
    #    for i in xrange(k):
    #        q, r = tf.qr(XNew[i,:,:])
    #        XNew[i,:,:] = tf.dot(q, tf.diag(tf.sign(tf.sign(tf.diag(r))+.5)))
    return XNew
def orthogonal_matrix_chunk(cols, dtype):
    use_numpy = False
    if use_numpy:
        unstructured_block = tf.random_normal((cols, cols), dtype=tf.float32)
        # with tf.GradientTape() as tape:
        #     tape.watch(unstructured_block)
        q, _ = tf.py_function(func=my_eig, inp=[unstructured_block], Tout=[tf.float32, tf.float32])
        q.set_shape(unstructured_block.get_shape())
        q = tf.saturate_cast(q, dtype=dtype)
        # print(q.shape)
    else:
        # unstructured_block = tf.stop_gradient(tf.random_normal((cols, cols), dtype=dtype))
        # q, r = tf.qr(unstructured_block, full_matrices=False)
        # q, r = tf.stop_gradient(q), tf.stop_gradient(r)
        # q, r = qr_wo_grad(unstructured_block)
        unstructured_block = tf.random_normal((cols, cols), dtype=tf.float32)
        q, r = tf.qr(unstructured_block, full_matrices=False)
    return tf.transpose(q)
Example #21
0
 def qr(A):
     Q, R = tf.qr(A)
     #m, n = A.shape.as_list()
     #norm = lambda v, u: tf.reduce_sum(v*u)
     #qi = tf.reshape(A[:,0], (m,1))
     #ri = norm(qi, qi)
     #q = [qi/ri]
     #R = [tf.sparse_to_dense([0], (n,), [ri])]
     #for i in range(1, n):
     #    qi = tf.reshape(A[:,i], (m, 1))
     #    r = []
     #    for j in range(0, i):
     #        ri = norm(qi, q[j])
     #        qi -= ri*q[j]
     #        r.append(ri)
     #    r.append(norm(qi, qi)**0.5)
     #    q.append(qi/r[-1])
     #    R.append(tf.sparse_to_dense(np.arange(0, i+1), (n,), ri))
     #Q = tf.concat(q, axis=1)
     #R = tf.transpose(tf.concat([tf.reshape(r, (n, 1)) for r in R], axis=1))
     return Q, R
Example #22
0
  def Test(self):
    np.random.seed(1)
    x_np = np.random.uniform(
        low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
    if is_complex:
      x_np += 1j * np.random.uniform(
          low=-1.0, high=1.0,
          size=np.prod(shape_)).reshape(shape_).astype(dtype_)

    for full_matrices in False, True:
      with self.test_session() as sess:
        if use_static_shape_:
          x_tf = tf.constant(x_np)
        else:
          x_tf = tf.placeholder(dtype_)
        q_tf, r_tf = tf.qr(x_tf, full_matrices=full_matrices)

        if use_static_shape_:
          q_tf_val, r_tf_val = sess.run([q_tf, r_tf])
        else:
          q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})

        q_dims = q_tf_val.shape
        np_q = np.ndarray(q_dims, dtype_)
        np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
        new_first_dim = np_q_reshape.shape[0]

        x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
        for i in range(new_first_dim):
          if full_matrices:
            np_q_reshape[i,:,:], _ = \
                np.linalg.qr(x_reshape[i,:,:], mode="complete")
          else:
            np_q_reshape[i,:,:], _ = \
                np.linalg.qr(x_reshape[i,:,:], mode="reduced")
        np_q = np.reshape(np_q_reshape, q_dims)
        CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
        CheckApproximation(self, x_np, q_tf_val, r_tf_val)
        CheckUnitary(self, q_tf_val)
Example #23
0
    def svd_initialization(self, z, y):
        group_index = [tf.where(tf.equal(y, k)) for k in xrange(self.n_class)
                       ]  # indices of datapoints in k-th cluster
        groups = [tf.gather(z, group_index[k])
                  for k in xrange(self.n_class)]  # datapoints in k-th cluster
        # remove extra dimension
        groups = [tf.squeeze(g, axis=1) for g in groups]
        # subtract mean
        if self.args.submean:
            groups = [g - tf.reduce_mean(g, 0, keep_dims=True) for g in groups]
        dim1 = tf.shape(z)[1]
        u_prime = []
        for i, g in enumerate(groups):
            N_g = tf.shape(g)[0]  # number of datapoints in this cluster
            gt = tf.transpose(g)
            q, r = tf.qr(gt, full_matrices=False)
            idx = [j for j in xrange(args.rank)]
            qq = tf.gather(tf.transpose(q), idx)
            qq = tf.transpose(qq)
            u_prime.append(qq)

        return u_prime
Example #24
0
def _orthogonalize_batch_tt_cores_left_to_right(tt):
    """Orthogonalize TT-cores of a batch TT-object in the left to right order.

  Args:
    tt: TensorTrainBatch.

  Returns:
    TensorTrainBatch
  """
    # Left to right orthogonalization.
    ndims = tt.ndims()
    raw_shape = shapes.lazy_raw_shape(tt)
    tt_ranks = shapes.lazy_tt_ranks(tt)
    next_rank = tt_ranks[0]
    batch_size = shapes.lazy_batch_size(tt)

    # Copy cores references so we can change the cores.
    tt_cores = list(tt.tt_cores)
    for core_idx in range(ndims - 1):
        curr_core = tt_cores[core_idx]
        # TT-ranks could have changed on the previous iteration, so `tt_ranks` can
        # be outdated for the current TT-rank, but should be valid for the next
        # TT-rank.
        curr_rank = next_rank
        next_rank = tt_ranks[core_idx + 1]
        if tt.is_tt_matrix():
            curr_mode_left = raw_shape[0][core_idx]
            curr_mode_right = raw_shape[1][core_idx]
            curr_mode = curr_mode_left * curr_mode_right
        else:
            curr_mode = raw_shape[0][core_idx]

        qr_shape = (batch_size, curr_rank * curr_mode, next_rank)
        curr_core = tf.reshape(curr_core, qr_shape)
        curr_core, triang = tf.qr(curr_core)
        if triang.get_shape().is_fully_defined():
            triang_shape = triang.get_shape().as_list()
        else:
            triang_shape = tf.shape(triang)
        # The TT-rank could have changed: if qr_shape is e.g. 4 x 10, than q would
        # be of size 4 x 4 and r would be 4 x 10, which means that the next rank
        # should be changed to 4.
        next_rank = triang_shape[1]
        if tt.is_tt_matrix():
            new_core_shape = (batch_size, curr_rank, curr_mode_left,
                              curr_mode_right, next_rank)
        else:
            new_core_shape = (batch_size, curr_rank, curr_mode, next_rank)

        tt_cores[core_idx] = tf.reshape(curr_core, new_core_shape)

        next_core = tf.reshape(tt_cores[core_idx + 1],
                               (batch_size, triang_shape[2], -1))
        tt_cores[core_idx + 1] = tf.matmul(triang, next_core)

    if tt.is_tt_matrix():
        last_core_shape = (batch_size, next_rank, raw_shape[0][-1],
                           raw_shape[1][-1], 1)
    else:
        last_core_shape = (batch_size, next_rank, raw_shape[0][-1], 1)
    tt_cores[-1] = tf.reshape(tt_cores[-1], last_core_shape)
    # TODO: infer the tt_ranks.
    return TensorTrainBatch(tt_cores,
                            tt.get_raw_shape(),
                            batch_size=batch_size)
Example #25
0
def trainspd(total_loss,
             global_step,
             optimizer,
             learning_rate,
             moving_average_decay,
             update_gradient_vars,
             log_histograms=True):
    # Generate moving averages of all losses and associated summaries.
    loss_averages_op = _add_loss_summaries(total_loss)

    # Compute gradients.
    with tf.control_dependencies([loss_averages_op]):
        if optimizer == 'ADAGRAD':
            opt = tf.train.AdagradOptimizer(learning_rate)
        elif optimizer == 'ADADELTA':
            opt = tf.train.AdadeltaOptimizer(learning_rate,
                                             rho=0.9,
                                             epsilon=1e-6)
        elif optimizer == 'ADAM':
            opt = tf.train.AdamOptimizer(learning_rate,
                                         beta1=0.9,
                                         beta2=0.999,
                                         epsilon=0.1)
        elif optimizer == 'RMSPROP':
            opt = tf.train.RMSPropOptimizer(learning_rate,
                                            decay=0.9,
                                            momentum=0.9,
                                            epsilon=1.0)
        elif optimizer == 'MOM':
            opt = tf.train.MomentumOptimizer(learning_rate,
                                             0.9,
                                             use_nesterov=True)
        else:
            raise ValueError('Invalid optimization algorithm')

        grads = opt.compute_gradients(total_loss, update_gradient_vars)

    for idx, (egrad, var) in enumerate(grads):
        if 'orth' in var.name:
            tmp1 = tf.matmul(tf.transpose(var), egrad)
            tmp2 = 0.5 * (tmp1 + tf.transpose(tmp1))
            rgrad = egrad - tf.matmul(var, tmp2)
            grads[idx] = (rgrad, var)

    # Apply gradients.
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    redun = 0.
    for grad, var in grads:
        if 'orth' in var.name:
            o_n, _ = tf.qr(var)
            redun = redun + tf.reduce_sum(var.assign(o_n), [0, 1])

    # Add histograms for trainable variables.
    if log_histograms:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

    # Add histograms for gradients.
    if log_histograms:
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)

    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op, redun
Example #26
0
def train_model():
    with tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True)) as session:

        all_real_data_conv = tf.placeholder(tf.int32,
                                            shape=[BATCH_SIZE, 3, DIM, DIM])
        split_real_data_conv = tf.split(all_real_data_conv, len(DEVICES))
        gen_costs, disc_costs, recon_costs = [], [], []

        for device_index, (device, real_data_conv) in enumerate(
                zip(DEVICES, split_real_data_conv)):
            with tf.device(device):

                real_data = tf.reshape(
                    2 * ((tf.cast(real_data_conv, tf.float32) / 255.) - .5),
                    [BATCH_SIZE // len(DEVICES), OUTPUT_DIM])

                fake_z = BEGANEncoder(real_data)
                fake_data = BEGANGenerator(BATCH_SIZE // len(DEVICES),
                                           noise=fake_z,
                                           bn=BN_G)

                if MODE == 'swae':
                    recon_cost = tf.reduce_mean(
                        tf.reduce_mean(
                            tf.squared_difference(real_data, fake_data)))
                else:
                    raise Exception()

                recon_costs.append(recon_cost)
        recon_cost = tf.add_n(recon_costs) / len(DEVICES)

        if MODE == 'swae':
            optimizer = tf.train.AdamOptimizer(learning_rate=D_LR,
                                               beta1=BETA1_D,
                                               beta2=0.9)

            grads_and_vars = optimizer.compute_gradients(
                recon_cost,
                var_list=lib.params_with_name1('Encoder', 'Generator'))
            for idx, (egrad, var) in enumerate(grads_and_vars):
                # print var.name
                if 'swd_proj' in var.name:
                    # print var.name
                    tmp1 = tf.matmul(tf.transpose(var), egrad)
                    tmp2 = 0.5 * (tmp1 + tf.transpose(tmp1))
                    rgrad = egrad - tf.matmul(var, tmp2)
                    grads_and_vars[idx] = (rgrad, var)
            recon_train_op = optimizer.apply_gradients(grads_and_vars)

            # stiefel update
            stiefel_up = tf.random_normal([FEATURE_DIM, FEATURE_DIM])
            for var in lib.params_with_name('Encoder.swd'):
                # print var.name
                if 'swd_proj' in var.name:
                    print(var.name)

                    o_n, _ = tf.qr(var)
                    stiefel_up = stiefel_up + tf.reduce_sum(
                        var.assign(o_n), [0, 1])

            tf.summary.scalar("recon_cost", recon_cost)
            summary_op = tf.summary.merge_all()
        else:
            raise Exception()

        # For generating samples
        fixed_noise = tf.constant(
            np.random.normal(size=(BATCH_SIZE, FEATURE_DIM)).astype('float32'))
        all_fixed_noise_samples = []
        for device_index, device in enumerate(DEVICES):
            n_samples = BATCH_SIZE // len(DEVICES)
            all_fixed_noise_samples.append(BEGANGenerator(n_samples))
        if tf.__version__.startswith('1.'):
            all_fixed_noise_samples = tf.concat(all_fixed_noise_samples,
                                                axis=0)
        else:
            all_fixed_noise_samples = tf.concat(0, all_fixed_noise_samples)

        def generate_image(iteration):
            samples = session.run(all_fixed_noise_samples)
            samples = ((samples + 1.) * 127.5).astype('int32')
            tflib.save_images.save_images(
                samples.reshape((BATCH_SIZE, 3, DIM, DIM)),
                '%s/samples_%d.png' % (SAMPLES_DIR, iteration))

        writer = tf.summary.FileWriter(TBOARD_DIR, session.graph)

        # Dataset iterator
        train_gen, dev_gen = tflib.data_loader.load(BATCH_SIZE, DATA_DIR,
                                                    DATASET)

        def inf_train_gen():
            while True:
                for (images, ) in train_gen():
                    yield images

        # Save a batch of ground-truth samples
        _x = inf_train_gen().__next__()
        _x_r = session.run(
            real_data, feed_dict={real_data_conv: _x[:BATCH_SIZE // N_GPUS]})
        _x_r = ((_x_r + 0.5) * 255).astype('int32')
        tflib.save_images.save_images(
            _x_r.reshape((BATCH_SIZE // N_GPUS, 3, DIM, DIM)),
            '%s/samples_groundtruth.png' % SAMPLES_DIR)

        session.run(tf.global_variables_initializer())

        # Checkpoint saver
        ckpt_saver = tf.train.Saver(max_to_keep=int(ITERS / CHECKPOINT_STEP))

        if LOAD_CHECKPOINT:
            is_check, ITER_START = load_checkpoint(session, ckpt_saver,
                                                   CHECKPOINT_DIR)
            if is_check:
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")

        gen = inf_train_gen()

        for it in range(ITERS):
            iteration = it + ITER_START
            start_time = time.time()
            _data = gen.__next__()

            _recon_cost, _, _, _summary_op = session.run(
                [recon_cost, recon_train_op, stiefel_up, summary_op],
                feed_dict={all_real_data_conv: _data})

            writer.add_summary(_summary_op, iteration)

            if iteration % SAVE_SAMPLES_STEP == SAVE_SAMPLES_STEP - 1:
                generate_image(iteration)
                print("Time: %g/itr, Itr: %d, reconstruction loss: %g" %
                      (time.time() - start_time, iteration, _recon_cost))

            # Save checkpoint
            if (iteration != 0) and (iteration % CHECKPOINT_STEP
                                     == CHECKPOINT_STEP - 1):
                if iteration == CHECKPOINT_STEP - 1:
                    ckpt_saver.save(session,
                                    os.path.join(CHECKPOINT_DIR, "SWAE.model"),
                                    iteration,
                                    write_meta_graph=True)
                else:
                    ckpt_saver.save(session,
                                    os.path.join(CHECKPOINT_DIR, "SWAE.model"),
                                    iteration,
                                    write_meta_graph=False)
Example #27
0
    def CameraIteration3(self, conv1, conv2, intrinsics, p, D, R, T, level, fx,
                         fy, ox, oy):

        conv1_shape = conv1.get_shape()
        nbatch = int(conv1_shape[0])
        npixels = int(conv1_shape[1])

        nchannels1 = int(conv1_shape[2])
        nchannels2 = int(2 * nchannels1)
        nchannels3 = nchannels1 + nchannels2

        with tf.name_scope("jacobian_and_projection"):

            P = tf.multiply(p, tf.tile(tf.transpose(D, [0, 2, 1]), [1, 3, 1]))
            P = tf.transpose(P, [0, 2, 1])

            #jacobianMatrixGeometry,projection=jacobian_construction(P,intrinsics,tf.transpose(R,[0,2,1]),T)
            #print "jacobian_test_ok"
            #print jacobianMatrixGeometry.get_shape()

            jacobianMatrixGeometry, grad, diff, valid = equation_construction_prepare(
                P, intrinsics, tf.transpose(R, [0, 2, 1]), T, conv1,
                conv2[:, :, :, 0:nchannels1], conv2[:, :, :,
                                                    nchannels1:nchannels2],
                conv2[:, :, :, nchannels2:nchannels3])
            # print "prepare_test_ok"
            # print jacobianMatrixGeometry.get_shape()
            #print grad_.get_shape(),diff_.get_shape(),valid_.get_shape()
            _, gra, dif, _ = equation_construction_fused(
                P, intrinsics, tf.transpose(R, [0, 2, 1]), T, conv1,
                conv2[:, :, :, 0:nchannels1], conv2[:, :, :,
                                                    nchannels1:nchannels2],
                conv2[:, :, :, nchannels2:nchannels3])

        with tf.name_scope("weighting"):

            # _conv2,_mask,index00,i00=utils.interpolate2d3(conv2,projection[:,:,0],projection[:,:,1])
            # num_valid= npixels/tf.reduce_sum(_mask,axis=1,keepdims=True)
            # mask   = tf.expand_dims(_mask,axis=-1)
            # _diff  = tf.expand_dims(_conv2[:,:,0:nchannels1]-conv1,-1)
            # _gradx = tf.expand_dims(_conv2[:,:,nchannels1:nchannels2],-1)
            # _grady = tf.expand_dims(_conv2[:,:,nchannels2:nchannels3],-1)
            # diff   = tf.matmul(_diff,mask)
            # grad   = tf.concat([tf.matmul(_gradx,mask),tf.matmul(_grady,mask)],-1)

            num_valid = npixels / tf.reduce_sum(valid, axis=1, keepdims=True)

            # assert_op = tf.Assert(False,[num_valid,num_valid2,projection,index00,tf.abs(tf.squeeze(grad)-tf.squeeze(grad_))],summarize=10000)
            # with tf.control_dependencies([assert_op]):
            #     diff=tf.identity(diff)

            # _,gra,dif,vali=equation_construction_fused(P,intrinsics,tf.transpose(R,[0,2,1]),T,conv1,conv2[:,:,:,0:nchannels1],conv2[:,:,:,nchannels1:nchannels2],conv2[:,:,:,nchannels2:nchannels3])
            # assert_op = tf.Assert(False,[gra,tf.matmul(grad,grad,transpose_a=True)],summarize=10000)
            # with tf.control_dependencies([assert_op]):
            #     diff=tf.identity(diff)

        with tf.name_scope("lambda_prediction"):

            avg_residual = num_valid * tf.reduce_mean(
                tf.abs(tf.squeeze(diff, axis=-1)), axis=1, keep_dims=True)
            lambda_conv1 = self.conv1d(avg_residual,
                                       2 * nchannels1,
                                       name="lambda_" + level + "_1",
                                       activation=tf.nn.selu)
            lambda_conv2 = self.conv1d(lambda_conv1,
                                       4 * nchannels1,
                                       name="lambda_" + level + "_2",
                                       activation=tf.nn.selu)
            lambda_conv3 = self.conv1d(lambda_conv2,
                                       2 * nchannels1,
                                       name="lambda_" + level + "_3",
                                       activation=tf.nn.selu)
            lambda_conv4 = self.conv1d(lambda_conv3,
                                       nchannels1,
                                       name="lambda_" + level + "_4",
                                       activation=tf.nn.selu)
            lambda_conv5 = self.conv1d(lambda_conv4,
                                       1,
                                       name="lambda_" + level + "_5",
                                       activation=tf.nn.tanh)
            lambda_prediction = tf.pow(
                tf.norm(avg_residual, axis=-1, keepdims=True),
                1.0 + lambda_conv5)
            avg_residual = tf.reduce_mean(avg_residual)

        with tf.name_scope("FirstEstimation"):

            with tf.name_scope("Solve"):

                #AtA,Atb=equation_construction(jacobian=jacobianMatrixGeometry,gradient=grad,difference=diff)
                AtA = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              tf.matmul(
                                                  gra, jacobianMatrixGeometry),
                                              transpose_a=True),
                                    axis=1)
                Atb = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              dif,
                                              transpose_a=True),
                                    axis=1)

                diag = tf.matrix_diag_part(AtA)
                AtA = AtA + tf.matrix_diag(
                    tf.squeeze(tf.matmul(
                        tf.expand_dims(diag, axis=-1) + 1e-5,
                        lambda_prediction),
                               axis=-1))

                if not qr:
                    motion = tf.matmul(tf.matrix_inverse(AtA), Atb)
                else:
                    q_full, r_full = tf.qr(AtA, full_matrices=True)
                    motion = tf.linalg.solve(
                        r_full, tf.matmul(q_full, Atb, transpose_a=True))

                assert_op = tf.Assert(tf.reduce_all(tf.is_finite(motion)),
                                      [gra, diff],
                                      summarize=10000)
                with tf.control_dependencies([assert_op]):
                    motion = tf.identity(motion)

            with tf.name_scope("Update"):

                wx, wy, wz, tx, ty, tz = tf.unstack(motion, num=6, axis=1)
                dr = self.AngleaAxisRotation(wx, wy, wz)
                dv = self.VMatrix(wx, wy, wz)
                dt = tf.stack([tx, ty, tz], axis=1)
                updatedR = tf.matmul(dr, R)
                updatedT = tf.add(tf.matmul(dv, dt), tf.matmul(dr, T))

            #with tf.name_scope("CheckUpdate"):

            # Rp = tf.matmul(updatedR,p)
            # RP = tf.multiply(Rp,tf.tile(tf.transpose(D,[0,2,1]),[1,3,1]))
            # RPT= tf.add(RP,tf.tile(updatedT,[1,1,npixels]))

            # Z  = RPT[:,2,:]
            # X  = RPT[:,0,:]
            # Y  = RPT[:,1,:]

            # x  = tf.div(X,Z)
            # y  = tf.div(Y,Z)

            # px=fx*x+ox
            # py=fy*y+oy

            # _conv2,_mask =utils.interpolate2d(conv2,px,py)
            # _num_valid   =npixels/tf.reduce_sum(_mask,axis=1,keepdims=True)
            # #_num_valid   =tf.constant(1.0)
            # _diff        =_mask*(_conv2[:,:,0:nchannels1]-conv1)
            # _avg_residual=_num_valid*tf.reduce_mean(tf.abs(_diff),axis=1,keep_dims=True)
            # _avg_residual=tf.reduce_mean(_avg_residual)
            # motion=tf.squeeze(motion)

        # return tf.cond(tf.less(_avg_residual,residual_ratio*avg_residual),
        #                        lambda:(updatedR,updatedT,tf.norm(motion[:3]),tf.norm(motion[3:]),tf.squeeze(_num_valid)),
        #                        lambda:(R,T,tf.to_float(0.0),tf.to_float(0.0),tf.squeeze(num_valid)))
        return updatedR, updatedT, tf.norm(motion[:3]), tf.norm(
            motion[3:]), tf.squeeze(num_valid)
Example #28
0
    def CameraIteration2(self, conv1, conv2, fx, fy, ox, oy, p, D, R, T,
                         level):

        with tf.name_scope("initialization"):

            conv1_shape = conv1.get_shape()
            nbatch = int(conv1_shape[0])
            npixels = int(conv1_shape[1])

            nchannels1 = int(conv1_shape[2])
            nchannels2 = int(2 * nchannels1)
            nchannels3 = nchannels1 + nchannels2

        with tf.name_scope("warp_compute"):
            Rp = tf.matmul(R, p)
            RP = tf.multiply(Rp, tf.tile(tf.transpose(D, [0, 2, 1]),
                                         [1, 3, 1]))
            RPT = tf.add(RP, tf.tile(T, [1, 1, npixels]))

            Z = RPT[:, 2, :]
            X = RPT[:, 0, :]
            Y = RPT[:, 1, :]

            x = tf.div(X, Z)
            y = tf.div(Y, Z)

            px = fx * x + ox
            py = fy * y + oy

        with tf.name_scope("warp_conv"):

            with tf.name_scope("weighting"):
                _conv2, _mask = utils.interpolate2d(conv2, px, py)
                num_valid = npixels / tf.reduce_sum(
                    _mask, axis=1, keepdims=True)
                mask = tf.expand_dims(_mask, axis=-1)
                _diff = tf.expand_dims(_conv2[:, :, 0:nchannels1] - conv1, -1)
                _gradx = tf.expand_dims(_conv2[:, :, nchannels1:nchannels2],
                                        -1)
                _grady = tf.expand_dims(_conv2[:, :, nchannels2:nchannels3],
                                        -1)

                diff = tf.matmul(_diff, mask)
                grad = tf.concat(
                    [tf.matmul(_gradx, mask),
                     tf.matmul(_grady, mask)], -1)

        with tf.name_scope("lambda_prediction"):

            avg_residual = num_valid * tf.reduce_mean(
                tf.abs(tf.squeeze(diff, axis=-1)), axis=1, keep_dims=True)
            lambda_conv1 = self.conv1d(avg_residual,
                                       2 * nchannels1,
                                       name="lambda_" + level + "_1",
                                       activation=tf.nn.selu)
            lambda_conv2 = self.conv1d(lambda_conv1,
                                       4 * nchannels1,
                                       name="lambda_" + level + "_2",
                                       activation=tf.nn.selu)
            lambda_conv3 = self.conv1d(lambda_conv2,
                                       2 * nchannels1,
                                       name="lambda_" + level + "_3",
                                       activation=tf.nn.selu)
            lambda_conv4 = self.conv1d(lambda_conv3,
                                       nchannels1,
                                       name="lambda_" + level + "_4",
                                       activation=tf.nn.selu)
            lambda_conv5 = self.conv1d(lambda_conv4,
                                       1,
                                       name="lambda_" + level + "_5",
                                       activation=tf.nn.tanh)
            lambda_prediction = tf.pow(
                tf.norm(avg_residual, axis=-1, keepdims=True),
                1.0 + lambda_conv5)
            avg_residual = tf.reduce_mean(avg_residual)

        with tf.name_scope("FirstEstimation"):

            with tf.name_scope("Solve"):

                jacobianMatrixGeometry = self.CameraJacobianMatrix(
                    x, y, Z, fx, fy)
                AtA = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              tf.matmul(
                                                  tf.matmul(grad,
                                                            grad,
                                                            transpose_a=True),
                                                  jacobianMatrixGeometry),
                                              transpose_a=True),
                                    axis=1)
                Atb = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              tf.matmul(grad,
                                                        diff,
                                                        transpose_a=True),
                                              transpose_a=True),
                                    axis=1)

                diag = tf.matrix_diag_part(AtA)
                AtA = AtA + tf.matrix_diag(
                    tf.squeeze(tf.matmul(
                        tf.expand_dims(diag, axis=-1) + 1e-5,
                        lambda_prediction),
                               axis=-1))
                #motion = tf.matmul(tf.matrix_inverse(AtA),Atb)

                if not qr:
                    motion = tf.matmul(tf.matrix_inverse(AtA), Atb)
                else:
                    q_full, r_full = tf.qr(AtA, full_matrices=True)
                    motion = tf.linalg.solve(
                        r_full, tf.matmul(q_full, Atb, transpose_a=True))

            with tf.name_scope("Update"):

                wx, wy, wz, tx, ty, tz = tf.unstack(motion, num=6, axis=1)
                dr = self.AngleaAxisRotation(wx, wy, wz)
                dv = self.VMatrix(wx, wy, wz)
                dt = tf.stack([tx, ty, tz], axis=1)
                updatedR = tf.matmul(dr, R)
                updatedT = tf.add(tf.matmul(dv, dt), tf.matmul(dr, T))

            with tf.name_scope("CheckUpdate"):

                Rp = tf.matmul(updatedR, p)
                RP = tf.multiply(
                    Rp, tf.tile(tf.transpose(D, [0, 2, 1]), [1, 3, 1]))
                RPT = tf.add(RP, tf.tile(updatedT, [1, 1, npixels]))

                Z = RPT[:, 2, :]
                X = RPT[:, 0, :]
                Y = RPT[:, 1, :]

                x = tf.div(X, Z)
                y = tf.div(Y, Z)

                px = fx * x + ox
                py = fy * y + oy

                _conv2, _mask = utils.interpolate2d(conv2, px, py)
                _num_valid = npixels / tf.reduce_sum(
                    _mask, axis=1, keepdims=True)
                _diff = _mask * (_conv2[:, :, 0:nchannels1] - conv1)
                _avg_residual = _num_valid * tf.reduce_mean(
                    tf.abs(_diff), axis=1, keep_dims=True)
                _avg_residual = tf.reduce_mean(_avg_residual)

                motion = tf.squeeze(motion)
                # assert_op = tf.Assert(False,[motion[3:]],summarize=10000)
                # with tf.control_dependencies([assert_op]):
                #     motion=tf.identity(motion)

                # def return_origin():
                #     return R,T,tf.to_float(0.0),tf.to_float(0.0),tf.squeeze(num_valid)

                # def return_update():
                #     global motion
                #     assert_op = tf.Assert(False,[tf.norm(motion[:3]),tf.norm(motion[3:])],summarize=10000)
                #     with tf.control_dependencies([assert_op]):
                #         motion=tf.identity(motion)
                #     return updatedR,updatedT,tf.norm(motion[:3]),tf.norm(motion[3:]),tf.squeeze(num_valid)

        return tf.cond(
            tf.less(_avg_residual, residual_ratio * avg_residual), lambda:
            (updatedR, updatedT, tf.norm(motion[:3]), tf.norm(motion[3:]),
             tf.squeeze(num_valid)), lambda:
            (R, T, tf.to_float(0.0), tf.to_float(0.0), tf.squeeze(num_valid)))
Example #29
0
    def CameraIteration(self, conv1, conv2, fx, fy, ox, oy, p, D, R, T):

        with tf.name_scope("initialization"):

            conv1_shape = conv1.get_shape()
            nbatch = int(conv1_shape[0])
            npixels = int(conv1_shape[1])

            nchannels1 = int(conv1_shape[2])
            nchannels2 = int(2 * nchannels1)
            nchannels3 = nchannels1 + nchannels2

        with tf.name_scope("warp_compute"):
            Rp = tf.matmul(R, p)
            RP = tf.multiply(Rp, tf.tile(tf.transpose(D, [0, 2, 1]),
                                         [1, 3, 1]))
            RPT = tf.add(RP, tf.tile(T, [1, 1, npixels]))

            Z = RPT[:, 2, :]
            X = RPT[:, 0, :]
            Y = RPT[:, 1, :]

            x = tf.div(X, Z)
            y = tf.div(Y, Z)

            px = fx * x + ox
            py = fy * y + oy

        with tf.name_scope("warp_conv"):

            with tf.name_scope("weighting"):
                _conv2, _mask = utils.interpolate2d(conv2, px, py)
                mask = tf.expand_dims(_mask, axis=-1)
                _diff = tf.expand_dims(_conv2[:, :, 0:nchannels1] - conv1, -1)
                _gradx = tf.expand_dims(_conv2[:, :, nchannels1:nchannels2],
                                        -1)
                _grady = tf.expand_dims(_conv2[:, :, nchannels2:nchannels3],
                                        -1)

                diff = tf.matmul(_diff, mask)
                grad = tf.concat(
                    [tf.matmul(_gradx, mask),
                     tf.matmul(_grady, mask)], -1)

        with tf.name_scope("lambda_prediction"):

            avg_residual = tf.reduce_mean(tf.abs(tf.squeeze(diff, axis=-1)),
                                          axis=1,
                                          keep_dims=True)
            lambda_prediction = tf.pow(
                tf.norm(avg_residual, axis=-1, keepdims=True), 2.0)

        with tf.name_scope("FirstEstimation"):

            with tf.name_scope("Solve"):

                jacobianMatrixGeometry = self.CameraJacobianMatrix(
                    x, y, Z, fx, fy)
                AtA = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              tf.matmul(
                                                  tf.matmul(grad,
                                                            grad,
                                                            transpose_a=True),
                                                  jacobianMatrixGeometry),
                                              transpose_a=True),
                                    axis=1)
                Atb = tf.reduce_sum(tf.matmul(jacobianMatrixGeometry,
                                              tf.matmul(grad,
                                                        diff,
                                                        transpose_a=True),
                                              transpose_a=True),
                                    axis=1)

                diag = tf.matrix_diag_part(AtA)
                AtA = AtA + tf.matrix_diag(
                    tf.squeeze(tf.matmul(
                        tf.expand_dims(diag, axis=-1) + 1e-5,
                        lambda_prediction),
                               axis=-1))
                if not qr:
                    motion = tf.matmul(tf.matrix_inverse(AtA), Atb)
                else:
                    q_full, r_full = tf.qr(AtA, full_matrices=True)
                    motion = tf.linalg.solve(
                        r_full, tf.matmul(q_full, Atb, transpose_a=True))

            with tf.name_scope("Update"):
                wx, wy, wz, tx, ty, tz = tf.unstack(motion, num=6, axis=1)
                dr = self.AngleaAxisRotation(wx, wy, wz)
                dt = tf.stack([tx, ty, tz], axis=1)
                updatedR = tf.matmul(dr, R)
                updatedT = tf.add(dt, tf.matmul(dr, T))
        return updatedR, updatedT, tf.reduce_sum(mask) / npixels
Example #30
0
import tensorflow as tf

"""tf.qr(input, full_matrices=None, name=None)
功能:对矩阵进行qr分解。
输入:。"""

a = tf.constant([1, 2, 2, 1, 0, 2, 0, 1, 1], shape=[3, 3], dtype=tf.float64)
q, r = tf.qr(a)

sess = tf.Session()
print(sess.run(tf.qr(a)))
sess.close()

# q==>[[-0.70710678   0.57735027   -0.40824829]
#      [-0.70710678  -0.57735027    0.40824829]
#      [0.            0.57735027   0.81649658 ]]
# r==>[[-1.41421356  -1.41421356   -2.82842712]
#      [0.            1.73205081    0.57735027]
#      [0.            0.            0.81649658]]
# Qr(q=array([[-0.70710678,  0.57735027, -0.40824829],
#        [-0.70710678, -0.57735027,  0.40824829],
#        [ 0.        ,  0.57735027,  0.81649658]]), r=array([[-1.41421356, -1.41421356, -2.82842712],
#        [ 0.        ,  1.73205081,  0.57735027],
#        [ 0.        ,  0.        ,  0.81649658]]))
Example #31
0
# 转置,可以通过指定 perm=[1, 0] 来进行轴变换
tf.transpose(a, perm=None, name='transpose')
# 在张量 a 的最后两个维度上进行转置
tf.matrix_transpose(a, name='matrix_transpose')
# Matrix with two batch dimensions, x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
# 求矩阵的迹
tf.trace(x, name=None)
# 计算方阵行列式的值
tf.matrix_determinant(input, name=None)
# 求解可逆方阵的逆,input 必须为浮点型或复数
tf.matrix_inverse(input, adjoint=None, name=None)
# 奇异值分解
tf.svd(tensor, full_matrices=False, compute_uv=True, name=None)
# QR 分解
tf.qr(input, full_matrices=None, name=None)
# 求张量的范数(默认2)
tf.norm(tensor, ord='euclidean', axis=None, keep_dims=False, name=None)

# 构建一个单位矩阵, 或者 batch 个矩阵,batch_shape 以 list 的形式传入
tf.eye(num_rows, num_columns=None, batch_shape=None, dtype=tf.float32, name=None)
# Construct one identity matrix.
tf.eye(2)
"""
==> [[1., 0.],
     [0., 1.]]
"""
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])