示例#1
0
  def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
    """ Angular Symmetry Function """

    max_atoms = self.max_atoms
    embedding = tf.eye(np.max(self.atom_cases) + 1)
    atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)

    Rs = np.linspace(0., self.angular_cutoff, self.angular_length)
    ita = 3 / (Rs[1] - Rs[0])**2
    thetas = np.linspace(0., np.pi, self.angular_length)
    zeta = float(self.angular_length**2)

    ita, zeta, Rs, thetas = np.meshgrid(ita, zeta, Rs, thetas)
    zeta = tf.cast(np.reshape(zeta, (1, 1, 1, 1, -1)), tf.float32)
    ita = tf.cast(np.reshape(ita, (1, 1, 1, 1, -1)), tf.float32)
    Rs = tf.cast(np.reshape(Rs, (1, 1, 1, 1, -1)), tf.float32)
    thetas = tf.cast(np.reshape(thetas, (1, 1, 1, 1, -1)), tf.float32)
    length = zeta.get_shape().as_list()[-1]

    vector_distances = tf.stack([coordinates] * max_atoms, 1) - tf.stack(
        [coordinates] * max_atoms, 2)
    R_ij = tf.stack([d] * max_atoms, axis=3)
    R_ik = tf.stack([d] * max_atoms, axis=2)
    f_R_ij = tf.stack([d_cutoff] * max_atoms, axis=3)
    f_R_ik = tf.stack([d_cutoff] * max_atoms, axis=2)

    # Define angle theta = arccos(R_ij(Vector) dot R_ik(Vector)/R_ij(distance)/R_ik(distance))
    vector_mul = tf.reduce_sum(tf.stack([vector_distances] * max_atoms, axis=3) * \
                               tf.stack([vector_distances] * max_atoms, axis=2), axis=4)
    vector_mul = vector_mul * tf.sign(f_R_ij) * tf.sign(f_R_ik)
    theta = tf.acos(tf.math.divide(vector_mul, R_ij * R_ik + 1e-5))

    R_ij = tf.stack([R_ij] * length, axis=4)
    R_ik = tf.stack([R_ik] * length, axis=4)
    f_R_ij = tf.stack([f_R_ij] * length, axis=4)
    f_R_ik = tf.stack([f_R_ik] * length, axis=4)
    theta = tf.stack([theta] * length, axis=4)

    out_tensor = tf.pow((1. + tf.cos(theta - thetas)) / 2., zeta) * \
                 tf.exp(-ita * tf.square((R_ij + R_ik) / 2. - Rs)) * f_R_ij * f_R_ik * 2

    if self.atomic_number_differentiated:
      out_tensors = []
      for id_j, atom_type_j in enumerate(self.atom_cases):
        for atom_type_k in self.atom_cases[id_j:]:
          selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
                           tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
          selected_atoms = tf.expand_dims(
              tf.expand_dims(selected_atoms, axis=1), axis=4)
          out_tensors.append(
              tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
      return tf.concat(out_tensors, axis=2)
    else:
      return tf.reduce_sum(out_tensor, axis=(2, 3))
示例#2
0
def to_degrees(log_quaternion_loss):
  """Converts a log quaternion distance to an angle.

  Args:
    log_quaternion_loss: The log quaternion distance between two
      unit quaternions (or a batch of pairs of quaternions).

  Returns:
    The angle in degrees of the implied angle-axis representation.
  """
  return tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi
示例#3
0
def zero_mean_covariance(covariance, stability=0.0):
    '''Output covariance of ReLU for zero-mean Gaussian input.

    f(x) = max(x, 0).

    Args:
        covariance: Input covariance matrix (Size, Size).
        stability: For accurate results this should be zero
            if used in training, use a value like 1e-4 for stability.

    Returns:
        Output covariance of ReLU for zero-mean Gaussian input (Size, Size).
    '''

    S = outer(tf.sqrt(tf.matrix_diag_part(covariance)))
    V = tf.clip_by_value(covariance / S, stability - 1.0, 1.0 - stability)
    Q = tf.acos(-V) * V + tf.sqrt(1.0 - (V**2.0)) - 1.0
    return S * Q * (1.0 / (2.0 * math.pi))
示例#4
0
def compute_degree(g1, g2, eps=1e-7):
  """Compute the degree between two vectors using their usual inner product."""

  def _dot(u, v):
    return tf.reduce_sum(u * v)

  g1_norm = tf.sqrt(_dot(g1, g1))
  g2_norm = tf.sqrt(_dot(g2, g2))
  if g1_norm.numpy() == 0 and g2_norm.numpy() == 0:
    cosine = 1. - eps
  else:
    g1_norm = 1. if g1_norm.numpy() == 0 else g1_norm
    g2_norm = 1. if g2_norm.numpy() == 0 else g2_norm
    cosine = _dot(g1, g2) / g1_norm / g2_norm
    # Restrict to arccos range
    cosine = tf.minimum(tf.maximum(cosine, eps - 1.), 1. - eps)
  degree = tf.acos(cosine) * 180. / 3.141592653589793

  return degree
示例#5
0
    def K(self, X, X2=None, presliced=False):
        if not presliced:
            X, X2 = self._slice(X, X2)

        X_denominator = tf.sqrt(self._weighted_product(X))
        if X2 is None:
            X2 = X
            X2_denominator = X_denominator
        else:
            X2_denominator = tf.sqrt(self._weighted_product(X2))

        numerator = self._weighted_product(X, X2)
        cos_theta = numerator / X_denominator[:, None] / X2_denominator[None, :]
        jitter = 1e-15
        theta = tf.acos(jitter + (1 - 2 * jitter) * cos_theta)

        return self.variance * (1. / np.pi) * self._J(theta) * \
               X_denominator[:, None] ** self.order * \
               X2_denominator[None, :] ** self.order
示例#6
0
def combine_loss_val(embedding, labels, w_init, out_num, margin_a, margin_m, margin_b, s):
    '''
    This code is contributed by RogerLo. Thanks for you contribution.

    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value default is 64
    :param out_num: output class num
    :param m: the margin value, default is 0.5
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                              initializer=w_init, dtype=tf.float32)
    weights_unit = tf.nn.l2_normalize(weights, axis=0)
    embedding_unit = tf.nn.l2_normalize(embedding, axis=1)
    cos_t = tf.matmul(embedding_unit, weights_unit)
    ordinal = tf.constant(list(range(0, embedding.get_shape().as_list()[0])), tf.int64)
    ordinal_y = tf.stack([ordinal, labels], axis=1)
    zy = cos_t * s
    sel_cos_t = tf.gather_nd(zy, ordinal_y)
    if margin_a != 1.0 or margin_m != 0.0 or margin_b != 0.0:
        if margin_a == 1.0 and margin_m == 0.0:
            s_m = s * margin_b
            new_zy = sel_cos_t - s_m
        else:
            cos_value = sel_cos_t / s
            t = tf.acos(cos_value)
            if margin_a != 1.0:
                t = t * margin_a
            if margin_m > 0.0:
                t = t + margin_m
            body = tf.cos(t)
            if margin_b > 0.0:
                body = body - margin_b
            new_zy = body * s
    updated_logits = tf.add(zy, tf.scatter_nd(ordinal_y, tf.subtract(new_zy, sel_cos_t), zy.get_shape()))
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=updated_logits))
    predict_cls = tf.argmax(updated_logits, 1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(predict_cls, tf.int64), tf.cast(labels, tf.int64)), 'float'))
    predict_cls_s = tf.argmax(zy, 1)
    accuracy_s = tf.reduce_mean(tf.cast(tf.equal(tf.cast(predict_cls_s, tf.int64), tf.cast(labels, tf.int64)), 'float'))
    return zy, loss, accuracy, accuracy_s, predict_cls_s
示例#7
0
 def _arccosine(self, slist1, slist2, tf_embs):
     """
     Uses an arccosine kernel of degree 0 to calculate
     the similarity matrix between two vectors of embeddings. 
     This is just cosine similarity projected into the [0,1] interval.
     """
     dot = self._dot(slist1, slist2, tf_embs)
     # This calculation corresponds to an arc-cosine with 
     # degree 0. It can be interpreted as cosine
     # similarity but projected into a [0,1] interval.
     # TODO: arc-cosine with degree 1.
     tf_pi = tf.constant(np.pi, dtype=tf.float64)
     tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')
     normlist1 = tf.gather(tf_norms, slist1, name='normlist1')
     normlist2 = tf.matrix_transpose(tf.gather(tf_norms, slist2, name='normlist2'))
     norms = tf.batch_matmul(normlist1, normlist2)
     cosine = tf.clip_by_value(tf.truediv(dot, norms), -1, 1)
     angle = tf.acos(cosine)
     angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)
     return 1 - (angle / tf_pi)
示例#8
0
    def K(self, X, X2=None, presliced=False):
        if not presliced:
            X, X2 = self._slice(X, X2)

        X_denominator = tf.sqrt(self._weighted_product(X))
        if X2 is None:
            X2 = X
            X2_denominator = X_denominator
        else:
            X2_denominator = tf.sqrt(self._weighted_product(X2))

        numerator = self._weighted_product(X, X2)
        X_denominator = tf.expand_dims(X_denominator, -1)
        X2_denominator = tf.matrix_transpose(tf.expand_dims(X2_denominator, -1))
        cos_theta = numerator / X_denominator / X2_denominator
        jitter = 1e-15
        theta = tf.acos(jitter + (1 - 2 * jitter) * cos_theta)

        return self.variance * (1. / np.pi) * self._J(theta) * \
               X_denominator ** self.order * \
               X2_denominator ** self.order
示例#9
0
 def _arccosine(self, s1, s2, tf_embs):
     """
     Uses an arccosine kernel of degree 0 to calculate
     the similarity matrix between two vectors of embeddings. 
     This is just cosine similarity projected into the [0,1] interval.
     """
     tf_pi = tf.constant(np.pi, dtype=tf.float64)
     mat1 = tf.gather(tf_embs, s1)
     mat2 = tf.gather(tf_embs, s2)
     tf_norms = tf.constant(self.norms, dtype=tf.float64, name='norms')
     norms1 = tf.gather(tf_norms, s1)
     norms2 = tf.gather(tf_norms, s2)
     dot = tf.matmul(mat1, tf.transpose(mat2))
     norms = tf.matmul(norms1, tf.transpose(norms2))
     # We clip values due to numerical errors
     # which put some values outside the arccosine range.
     cosine = tf.clip_by_value(dot / norms, -1, 1)
     angle = tf.acos(cosine)
     # The 0 vector has norm 0, which generates a NaN.
     # We catch these NaNs and replace them with pi,
     # which ends up returning 0 similarity.
     angle = tf.select(tf.is_nan(angle), tf.ones_like(angle) * tf_pi, angle)
     return 1 - (angle / tf_pi)
示例#10
0
文件: gan_grid.py 项目: Dasona/DIGITS
 def slerp(initial, final, progress):
     omega = tf.acos(dot_product(initial / l2_norm(initial), final / l2_norm(final)))
     so = tf.sin(omega)
     return tf.sin((1.0-progress)*omega) / so * initial + tf.sin(progress*omega)/so * final
示例#11
0
    def _conv_layer(self,
                    bottom,
                    ksize,
                    n_filt,
                    is_training,
                    name,
                    stride=1,
                    bn=False,
                    relu=True,
                    pad='SAME',
                    norm='none',
                    reg=False,
                    orth=False,
                    w_norm='none'):

        with tf.variable_scope(name) as scope:
            n_input = bottom.get_shape().as_list()[3]
            shape = [ksize, ksize, n_input, n_filt]
            print("shape of filter %s: %s" % (name, str(shape)))

            filt = self.get_conv_filter(
                shape,
                reg,
                stddev=tf.sqrt(2.0 / tf.to_float(ksize * ksize * n_input)))
            conv = tf.nn.conv2d(bottom,
                                filt, [1, stride, stride, 1],
                                padding=pad)
            xnorm = self._get_input_norm(bottom, ksize, pad)
            wnorm = self._get_filter_norm(filt)

            if w_norm == 'linear':
                conv = conv / wnorm
                conv = -0.63662 * tf.acos(conv) + 1
            elif w_norm == 'cosine':
                conv = conv / wnorm
            elif w_norm == 'sigmoid':
                k_value_w = 0.3
                constant_coeff_w = (1 + numpy.exp(
                    -numpy.pi /
                    (2 * k_value_w))) / (1 - numpy.exp(-numpy.pi /
                                                       (2 * k_value_w)))
                conv = conv / wnorm
                conv = constant_coeff_w * (1 - tf.exp(
                    tf.acos(conv) / k_value_w - numpy.pi /
                    (2 * k_value_w))) / (1 + tf.exp(
                        tf.acos(conv) / k_value_w - numpy.pi /
                        (2 * k_value_w)))
            elif w_norm == 'none':
                pass

            if norm == 'linear':
                conv = conv / xnorm
                conv = conv / wnorm
                conv = -0.63662 * tf.acos(conv) + 1
            elif norm == 'cosine':
                conv = conv / xnorm
                conv = conv / wnorm
            elif norm == 'sigmoid':
                k_value = 0.3
                constant_coeff = (1 + numpy.exp(-numpy.pi / (2 * k_value))) / (
                    1 - numpy.exp(-numpy.pi / (2 * k_value)))
                conv = conv / xnorm
                conv = conv / wnorm
                conv = constant_coeff * (
                    1 -
                    tf.exp(tf.acos(conv) / k_value - numpy.pi /
                           (2 * k_value))) / (1 + tf.exp(
                               tf.acos(conv) / k_value - numpy.pi /
                               (2 * k_value)))
            elif norm == 'lr_sigmoid':
                k_value_lr = tf.get_variable(
                    'k_value_lr',
                    n_filt,
                    initializer=tf.constant_initializer(0.7),
                    dtype=tf.float32)
                k_value_lr = tf.abs(k_value_lr) + 0.05
                constant_coeff = (1 + tf.exp(-numpy.pi / (2 * k_value_lr))) / (
                    1 - tf.exp(-numpy.pi / (2 * k_value_lr)))
                conv = conv / xnorm
                conv = conv / wnorm
                conv = constant_coeff * (1 - tf.exp(
                    tf.acos(conv) / k_value_lr - numpy.pi /
                    (2 * k_value_lr))) / (1 + tf.exp(
                        tf.acos(conv) / k_value_lr - numpy.pi /
                        (2 * k_value_lr)))
            elif norm == 'none':
                pass

            if orth:
                self._add_orthogonal_constraint(filt, n_filt)

            if bn:
                conv = self.batch_norm(conv, n_filt, is_training)

            if relu:
                return tf.nn.relu(conv)
            else:
                return conv
示例#12
0
 def testRenames(self):
   with self.test_session():
     self.assertAllClose(1.04719755, tf.acos(0.5).eval())
     self.assertAllClose(0.5, tf.rsqrt(4.0).eval())
示例#13
0
 def testRenames(self):
   self.assertAllClose(1.04719755, tf.acos(0.5))
   self.assertAllClose(0.5, tf.rsqrt(4.0))
sts_input2 = tf.sparse_placeholder(tf.int64, shape=(None, None))

# For evaluation we use exactly normalized rather than
# approximately normalized.
sts_encode1 = tf.nn.l2_normalize(
    embed(inputs=dict(values=sts_input1.values,
                      indices=sts_input1.indices,
                      dense_shape=sts_input1.dense_shape)),
    axis=1)
sts_encode2 = tf.nn.l2_normalize(
    embed(inputs=dict(values=sts_input2.values,
                      indices=sts_input2.indices,
                      dense_shape=sts_input2.dense_shape)),
    axis=1)

sim_scores = -tf.acos(
    tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1))

# In[17]:

#dataset = sts_test[11:16] #@param ["sts_dev", "sts_test"] {type:"raw"}

#dataset = pandas.DataFrame()
#dataset['sent_2'] = sts_test['sent_2'][11:16]
#dataset['sent_1'] = "A man is cutting an onion"
#dataset.reset_index(drop = True,inplace = True)
#dataset = dataset[['sent_1','sent_2']]
#dataset

QuestionBank = pd.read_excel(
    "C:\Girish\GDSAutomationCentral\EYGDSAC_AnalyticsInitiatives\GDSAnalytics\Infosec_Questionnaire\Colated_Questionnaire.xlsx",
    sheet_name="Q&A")
示例#15
0
文件: rbm.py 项目: rongacmer/GWAS
    def __init__(
            self,
            n_visible,
            n_hidden,
            load=False,
            save=False,
            save_filename=None,
            save_name=None,
            learning_rate=0.01,
            momentum=0.95,
            xavier_const=1.0,
            err_function='mse',
            use_tqdm=False,
            # DEPRECATED:
            tqdm=None):

        tf.reset_default_graph()

        if not 0.0 <= momentum <= 1.0:
            raise ValueError('momentum should be in range [0, 1]')

        if err_function not in {'mse', 'cosine'}:
            raise ValueError(
                'err_function should be either \'mse\' or \'cosine\'')

        self._use_tqdm = use_tqdm
        self._tqdm = None

        if use_tqdm or tqdm is not None:
            from tqdm import tqdm
            self._tqdm = tqdm

        #加载或保存权值
        self._save = save
        self._save_filename = save_filename
        self._save_name = save_name

        self.n_visible = n_visible
        self.n_hidden = n_hidden
        self.learning_rate = learning_rate
        self.momentum = momentum

        self.x = tf.placeholder(tf.float32, [None, self.n_visible])
        self.y = tf.placeholder(tf.float32, [None, self.n_hidden])

        self.w = tf.Variable(tf_xavier_init(self.n_visible,
                                            self.n_hidden,
                                            const=xavier_const),
                             dtype=tf.float32)
        self.visible_bias = tf.Variable(tf.zeros([self.n_visible]),
                                        dtype=tf.float32)
        self.hidden_bias = tf.Variable(tf.zeros([self.n_hidden]),
                                       dtype=tf.float32)

        self.delta_w = tf.Variable(tf.zeros([self.n_visible, self.n_hidden]),
                                   dtype=tf.float32)
        self.delta_visible_bias = tf.Variable(tf.zeros([self.n_visible]),
                                              dtype=tf.float32)
        self.delta_hidden_bias = tf.Variable(tf.zeros([self.n_hidden]),
                                             dtype=tf.float32)

        self.update_weights = None
        self.update_deltas = None
        self.compute_hidden = None
        self.compute_visible = None
        self.compute_visible_from_hidden = None

        self._initialize_vars()

        assert self.update_weights is not None
        assert self.update_deltas is not None
        assert self.compute_hidden is not None
        assert self.compute_visible is not None
        assert self.compute_visible_from_hidden is not None

        if err_function == 'cosine':
            x1_norm = tf.nn.l2_normalize(self.x, 1)
            x2_norm = tf.nn.l2_normalize(self.compute_visible, 1)
            cos_val = tf.reduce_mean(tf.reduce_sum(tf.mul(x1_norm, x2_norm),
                                                   1))
            self.compute_err = tf.acos(cos_val) / tf.constant(np.pi)
        else:
            self.compute_err = tf.reduce_mean(
                tf.square(self.x - self.compute_visible))

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)

        # 加载权值
        if self._load:
            self.load_weights(self._save_filename, self._name)
示例#16
0
        groundtruth = ((z[j, :, :, :] / 255.0) - 0.5) * 2
        mask = y[j, :, :, 0]
        bmask = tf.cast(mask, tf.bool)

        total_pixels += tf.count_nonzero(y[j, :, :, 0])
        #tf.assign(bmask,tf.not_equal(bmask,0))

        a11 = tf.boolean_mask(tf.reduce_sum(prediction * prediction, axis=2),
                              bmask)
        a22 = tf.boolean_mask(tf.reduce_sum(norm * norm, axis=2), bmask)
        a12 = tf.boolean_mask(tf.reduce_sum(prediction * norm, axis=2), bmask)

        cos_dist = a12 / tf.sqrt(a11 * a22)
        #tf.assign(cos_dist[tf.is_nan(cos_dist)],-1) # missing this in the evalution
        cos_dist = tf.clip_by_value(cos_dist, -1, 1)
        angle_error = tf.acos(cos_dist)
        mean_angle_error += tf.reduce_sum(angle_error)

    cost = mean_angle_error / tf.cast(total_pixels, tf.float32)
    cost = tf.divide(cost, batch_size)

    opt = tf.train.AdamOptimizer(0.0001).minimize(cost)

# the driver
random.shuffle(data)
train, test = train_test_split(data, data_size // 20)

with tf.Session(graph=train_graph) as sess:
    sess.run(tf.global_variables_initializer())
    for e in range(1, epochs + 1):
        num_batches = 0
示例#17
0
def slerp(a, b, t):
    omega = tf.acos(tf.reduce_sum(a / tf.norm(a, axis=1, keepdims=True) * b / tf.norm(b, axis=1, keepdims=True), axis=1))
    omega = tf.expand_dims(omega, axis=1)
    res = (tf.sin((1.0 - t) * omega) / tf.sin(omega)) * a + (tf.sin(t * omega) / tf.sin(omega)) * b
    return res
示例#18
0
def loss(pred_quater,
         gt_quater,
         pred_transl,
         gt_transl,
         pred_mask,
         pred_traj,
         gt_traj,
         pred_score,
         gt_score,
         pred_r,
         gt_r,
         pred_flow,
         gt_flow,
         batch_size,
         dim=3,
         h=240,
         w=320):
    obj_mask_origin = tf.greater(gt_traj[:, :, :, 2],
                                 tf.zeros_like(gt_traj[:, :, :, 2]))
    obj_mask_origin = tf.cast(obj_mask_origin, tf.float32)
    obj_mask_1 = tf.reshape(obj_mask_origin, [-1, h, w, 1])
    obj_mask_6 = tf.tile(obj_mask_1, [1, 1, 1, 6])
    obj_mask_3 = tf.tile(obj_mask_1, [1, 1, 1, 3])
    obj_tmp = tf.zeros_like(obj_mask_3)
    obj_final = tf.concat([obj_mask_3, obj_tmp], 3)

    loss_mask = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.cast(
            obj_mask_origin, dtype=tf.int32),
                                                       logits=pred_mask))
    score_weight = obj_mask_origin + 0.00001

    loss_score = tf.reduce_sum(
        (score_weight) * tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.cast(gt_score, dtype=tf.int32), logits=pred_score)) / (
                tf.reduce_sum(score_weight) + 0.000001)

    loss_elem3 = tf.reduce_sum(
        tf.squared_difference(pred_traj[:, :, :, 0:3], gt_traj[:, :, :, 0:3]) *
        obj_mask_3) / (tf.reduce_sum(obj_mask_1) + 0.000001)
    loss_elem6 = tf.reduce_sum(
        tf.squared_difference(pred_traj[:, :, :, 3:6], gt_traj[:, :, :, 3:6]) *
        obj_mask_3) / (tf.reduce_sum(obj_mask_1) + 0.000001)

    loss_boundary = tf.reduce_sum(
        tf.squared_difference(gt_r, pred_r) *
        obj_mask_1) / (tf.reduce_sum(obj_mask_1) + 0.000001)
    loss_flow = tf.reduce_sum(
        tf.squared_difference(pred_flow, gt_flow) *
        obj_mask_3) / (tf.reduce_sum(obj_mask_1) + 0.000001)
    loss_transl = tf.reduce_sum(
        tf.squared_difference(pred_transl, gt_transl) *
        obj_mask_3) / (tf.reduce_sum(obj_mask_1) + 0.000001)
    gt_angle = tf.acos(gt_quater[:, :, :, 0:1]) * 2
    gt_angle_axis = gt_quater[:, :, :, 1:4] / (tf.sin(gt_angle / 2) +
                                               0.000001) * gt_angle

    loss_quater = tf.reduce_sum(
        tf.squared_difference(gt_angle_axis, pred_quater) *
        obj_mask_3) / (tf.reduce_sum(obj_mask_1 + 0.000001))

    loss_variance = 0.0
    loss_violation = 0.0

    for b_i in xrange(batch_size):
        tmp = gt_traj[b_i, :, :, 2]
        tmp = tf.reshape(tmp, (-1, ))
        y, idx = tf.unique(tmp)
        idx = tf.reshape(idx, (h, w, 1))

        ins_tmp = tf.ones_like(idx)
        ones = tf.ones_like(gt_traj[b_i, :, :, 2])

        obj_mask = obj_mask_origin[b_i]

        def instance_variance_loss(z):
            idx_mask = tf.equal(gt_traj[b_i, :, :, 2], ones * z)
            idx_mask = tf.reshape(idx_mask, [h, w, 1])
            idx_mask = tf.cast(idx_mask, tf.float32)
            idx_mask_6d = tf.tile(idx_mask, [1, 1, 6])
            tmp_prd = idx_mask_6d * pred_traj[b_i]
            tmp_prd = tf.reshape(tmp_prd, (-1, 6))
            tmp_mean = tf.reduce_sum(
                tmp_prd, axis=0) / (tf.reduce_sum(idx_mask) + 0.000001)
            tmp_mean = tf.reshape(tmp_mean, (1, 1, 6))
            tmp_mean_final = tf.tile(tmp_mean, [h, w, 1])
            loss_variance_instance = tf.reduce_sum(
                idx_mask_6d *
                tf.squared_difference(tmp_mean_final, pred_traj[b_i])) / (
                    tf.reduce_sum(idx_mask) + 0.000001)
            return loss_variance_instance

        #loss_variance += tf.reduce_mean(tf.map_fn(instance_variance_loss,y))

        def instance_violation_loss(z):
            idx_mask = tf.equal(gt_traj[b_i, :, :, dim - 1], ones * z)
            idx_mask = tf.logical_and(idx_mask, tf.cast(obj_mask, tf.bool))
            idx_mask = tf.reshape(idx_mask, [h, w, 1])
            idx_mask = tf.cast(idx_mask, tf.float32)
            idx_mask_6d = tf.tile(idx_mask, [1, 1, 6])

            tmp_prd = idx_mask_6d * pred_traj[b_i, :, :, 0:6]
            tmp_prd = tf.reshape(tmp_prd, (-1, 6))

            tmp_r = idx_mask_6d[:, :, 0:1] * pred_r[b_i]
            tmp_r_mean = tf.reduce_sum(tmp_r) / (tf.reduce_sum(idx_mask) +
                                                 0.000001)
            r = tmp_r_mean * 0.5

            friend_mask = idx_mask_6d[:, :, 0]
            l2_error = tf.reduce_sum(
                tf.squared_difference(pred_traj[b_i], gt_traj[b_i]), 2)
            dist = tf.sqrt(l2_error)
            pull_mask = tf.less(r * ones, dist)
            pull_mask = tf.cast(pull_mask, tf.float32)
            pos = tf.reduce_sum(friend_mask * pull_mask * l2_error) / (
                tf.reduce_sum(friend_mask * pull_mask) + 0.000001)
            return pos

        #loss_violation += tf.reduce_mean(tf.map_fn(instance_violation_loss,y))

        def instance_sceneflow_loss(z):
            idx_mask = tf.equal(gt_traj[b_i, :, :, dim - 1], ones * z)
            idx_mask = tf.logical_and(idx_mask, tf.cast(obj_mask, tf.bool))
            idx_mask = tf.reshape(idx_mask, [h, w, 1])
            idx_mask = tf.cast(idx_mask, tf.float32)
            idx_mask_3d = tf.tile(idx_mask, [1, 1, 3])

            tmp_quater = idx_mask_3d * pred_quater[b_i, :, :, 0:3]
            tmp_quater = tf.reshape(tmp_quater, (-1, 3))
            tmp_quater = tf.reduce_sum(
                tmp_quater, axis=0) / (tf.reduce_sum(idx_mask) + 0.000001)

            tmp_transl = idx_mask_3d * pred_transl[b_i, :, :, 0:3]
            tmp_transl = tf.reshape(tmp_transl, (-1, 3))
            tmp_transl = tf.reduce_sum(
                tmp_transl, axis=0) / (tf.reduce_sum(idx_mask) + 0.000001)

            w1, x1, y1, z1 = tf.unstack(tmp_quater, axis=-1)
            x2, y2, z2 = tf.unstack(frame2_input_xyz[b_i], axis=-1)
            wm = -x1 * x2 - y1 * y2 - z1 * z2
            xm = w1 * x2 + y1 * z2 - z1 * y2
            ym = w1 * y2 + z1 * x2 - x1 * z2
            zm = w1 * z2 + x1 * y2 - y1 * x2

            x = -wm * x1 + xm * w1 - ym * z1 + zm * y1
            y = -wm * y1 + ym * w1 - zm * x1 + xm * z1
            z = -wm * z1 + zm * w1 - xm * y1 + ym * x1

            tmp_flow = tf.stack((x, y, z), axis=-1)
            tmp_flow = tmp_flow + tmp_transl - frame2_input_xyz[b_i]
            tmp_loss = tf.reduce_sum(idx_mask_3d * tf.squared_difference(
                tmp_flow, gt_flow[b_i])) / (tf.reduce_sum(idx_mask) + 0.000001)
            return tmp_loss

    return loss_quater, loss_transl, loss_boundary, loss_flow, loss_elem3, loss_elem6, loss_mask, loss_score
示例#19
0
文件: rbm.py 项目: meownoid/rbmae
    def __init__(self,
                 n_visible,
                 n_hidden,
                 learning_rate=0.01,
                 momentum=0.95,
                 xavier_const=1.0,
                 err_function='mse',
                 use_tqdm=False,
                 # DEPRECATED:
                 tqdm=None):
        if not 0.0 <= momentum <= 1.0:
            raise ValueError('momentum should be in range [0, 1]')

        if err_function not in {'mse', 'cosine'}:
            raise ValueError('err_function should be either \'mse\' or \'cosine\'')

        self._use_tqdm = use_tqdm
        self._tqdm = None

        if use_tqdm or tqdm is not None:
            from tqdm import tqdm
            self._tqdm = tqdm

        self.n_visible = n_visible
        self.n_hidden = n_hidden
        self.learning_rate = learning_rate
        self.momentum = momentum

        self.x = tf.placeholder(tf.float32, [None, self.n_visible])
        self.y = tf.placeholder(tf.float32, [None, self.n_hidden])

        self.w = tf.Variable(tf_xavier_init(self.n_visible, self.n_hidden, const=xavier_const), dtype=tf.float32)
        self.visible_bias = tf.Variable(tf.zeros([self.n_visible]), dtype=tf.float32)
        self.hidden_bias = tf.Variable(tf.zeros([self.n_hidden]), dtype=tf.float32)

        self.delta_w = tf.Variable(tf.zeros([self.n_visible, self.n_hidden]), dtype=tf.float32)
        self.delta_visible_bias = tf.Variable(tf.zeros([self.n_visible]), dtype=tf.float32)
        self.delta_hidden_bias = tf.Variable(tf.zeros([self.n_hidden]), dtype=tf.float32)

        self.update_weights = None
        self.update_deltas = None
        self.compute_hidden = None
        self.compute_visible = None
        self.compute_visible_from_hidden = None

        self._initialize_vars()

        assert self.update_weights is not None
        assert self.update_deltas is not None
        assert self.compute_hidden is not None
        assert self.compute_visible is not None
        assert self.compute_visible_from_hidden is not None

        if err_function == 'cosine':
            x1_norm = tf.nn.l2_normalize(self.x, 1)
            x2_norm = tf.nn.l2_normalize(self.compute_visible, 1)
            cos_val = tf.reduce_mean(tf.reduce_sum(tf.mul(x1_norm, x2_norm), 1))
            self.compute_err = tf.acos(cos_val) / tf.constant(np.pi)
        else:
            self.compute_err = tf.reduce_mean(tf.square(self.x - self.compute_visible))

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
示例#20
0
    def Prepare(self):
        """
		Build the required graph.
		Also build evaluations.
		"""
        with tf.name_scope("Capsules"):
            self.xyzs = tf.placeholder(dtype=tf.float64,
                                       shape=(self.batch_size, self.MaxNAtom,
                                              3))
            self.frac_sphere = tf.placeholder(dtype=tf.float64)

            thetas = tf.acos(
                2.0 * tf.random_uniform([self.batch_size], dtype=tf.float64) -
                1)
            phis = tf.random_uniform([self.batch_size],
                                     dtype=tf.float64) * 2 * Pi
            psis = tf.random_uniform(
                [self.batch_size],
                dtype=tf.float64) * 2 * Pi * self.frac_sphere
            ts_in = tf.stack([thetas, phis, psis], axis=-1)
            ts_inv = tf.stack([thetas, phis, -1.0 * psis], axis=-1)
            matrices = TF_RotationBatch(thetas, phis, psis)

            self.xyzs -= self.xyzs[:, 0, :][:, tf.newaxis, :]
            self.xyzs_t = tf.einsum('ijk,ikl->ijl', self.xyzs, matrices)
            # Transform the XYZ's
            # The transformation is only WRT the first atom
            # both orig. and transformed system get embedded.

            # Each atom in xyzs gets transformed by ts_in to make t_xyzs
            dxyzs = tf.expand_dims(self.xyzs, axis=2) - tf.expand_dims(
                self.xyzs, axis=1)
            dxyzs_t = tf.expand_dims(self.xyzs_t, axis=2) - tf.expand_dims(
                self.xyzs_t, axis=1)
            dist_tensor = tf.norm(dxyzs + 1.e-36, axis=3)
            dist_tensor_t = tf.norm(dxyzs_t + 1.e-36, axis=3)
            self.Embedded = tf.reshape(
                tf.reduce_sum(tf_spherical_harmonics(dxyzs, dist_tensor,
                                                     self.lmax),
                              axis=2)[:, 0, :],
                [self.batch_size, (self.lmax + 1)**2])
            self.Embedded_t = tf.reshape(
                tf.reduce_sum(tf_spherical_harmonics(dxyzs_t, dist_tensor_t,
                                                     self.lmax),
                              axis=2)[:, 0, :],
                [self.batch_size, (self.lmax + 1)**2])
            self.EmbeddedShp = tf.shape(self.Embedded)[-1]

            self.Latent, self.angleOutput, self.Output = self.Capsules(
                self.Embedded, ts_in)
            self.tLatent, self.tangleOutput, self.tOutput = self.Capsules(
                self.Embedded_t, ts_inv)
            self.dLdR = tf.norm(tf.gradients(self.tLatent, psis))

            self.fwd_reconstruction = tf.losses.mean_squared_error(
                self.Embedded_t, self.Output)
            self.rev_reconstruction = tf.losses.mean_squared_error(
                self.Embedded, self.tOutput)
            self.invariance = tf.losses.mean_squared_error(
                self.tLatent, self.Latent)
            self.loss = self.fwd_reconstruction + self.rev_reconstruction + self.invariance

            tf.add_to_collection('losses', self.loss)
        with tf.name_scope("Adam_optimizer"):
            self.global_step = tf.Variable(0, trainable=False)
            optimizer = tf.train.AdamOptimizer(self.learning_rate)
            tvars = tf.trainable_variables()
            grads_and_vars = optimizer.compute_gradients(self.loss, tvars)
            #clipped = [(tf.clip_by_value(grad, -5, 5), tvar) for grad, tvar in grads_and_vars]
            self.train_op = optimizer.apply_gradients(
                grads_and_vars,
                global_step=self.global_step,
                name="minimize_cost")
        init = tf.global_variables_initializer()
        self.saver = tf.train.Saver(max_to_keep=10000)
        self.sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True))
        self.sess.run(init)
        self.summary_writer = tf.summary.FileWriter("./networks/",
                                                    self.sess.graph)
        return