コード例 #1
0
    def make_update_op(self, upd_idxs, upd_keys, upd_vals, batch_size,
                       use_recent_idx, intended_output):
        """Function that creates all the update ops."""
        base_update_op = super(LSHMemory,
                               self).make_update_op(upd_idxs, upd_keys,
                                                    upd_vals, batch_size,
                                                    use_recent_idx,
                                                    intended_output)

        # compute hash slots to be updated
        hash_slot_idxs = self.get_hash_slots(upd_keys)

        # make updates
        update_ops = []
        with tf.control_dependencies([base_update_op]):
            for i, slot_idxs in enumerate(hash_slot_idxs):
                # for each slot, choose which entry to replace
                entry_idx = tf.random_uniform([batch_size],
                                              maxval=self.num_per_hash_slot,
                                              dtype=tf.int32)
                entry_mul = 1 - tf.one_hot(
                    entry_idx, self.num_per_hash_slot, dtype=tf.int32)
                entry_add = (tf.expand_dims(upd_idxs, 1) * tf.one_hot(
                    entry_idx, self.num_per_hash_slot, dtype=tf.int32))

                mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs,
                                        entry_mul)
                with tf.control_dependencies([mul_op]):
                    add_op = tf.scatter_add(self.hash_slots[i], slot_idxs,
                                            entry_add)
                    update_ops.append(add_op)

        return tf.group(*update_ops)
コード例 #2
0
ファイル: memory.py プロジェクト: tsingcoo/models
  def make_update_op(self, upd_idxs, upd_keys, upd_vals,
                     batch_size, use_recent_idx, intended_output):
    """Function that creates all the update ops."""
    base_update_op = super(LSHMemory, self).make_update_op(
        upd_idxs, upd_keys, upd_vals,
        batch_size, use_recent_idx, intended_output)

    # compute hash slots to be updated
    hash_slot_idxs = self.get_hash_slots(upd_keys)

    # make updates
    update_ops = []
    with tf.control_dependencies([base_update_op]):
      for i, slot_idxs in enumerate(hash_slot_idxs):
        # for each slot, choose which entry to replace
        entry_idx = tf.random_uniform([batch_size],
                                      maxval=self.num_per_hash_slot,
                                      dtype=tf.int32)
        entry_mul = 1 - tf.one_hot(entry_idx, self.num_per_hash_slot,
                                   dtype=tf.int32)
        entry_add = (tf.expand_dims(upd_idxs, 1) *
                     tf.one_hot(entry_idx, self.num_per_hash_slot,
                                dtype=tf.int32))

        mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul)
        with tf.control_dependencies([mul_op]):
          add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add)
          update_ops.append(add_op)

    return tf.group(*update_ops)
コード例 #3
0
ファイル: metrics.py プロジェクト: zadiq/competitions
 def protocol_1(y_true, y_pred, array=False):
     diff = calc_diff(y_true, y_pred)
     diff /= tf.cast(tf.reduce_prod(tf.shape(y_true)[1:]), tf.float32)
     tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'dadff')
     v = tf.Variable(diff,
                     name='dadff',
                     dtype='float32',
                     validate_shape=False)
     diff = tf.scatter_mul(tf.identity(v), tf.where(diff < 0), neg_weight)
     # diff = tf.scatter_mul(diff, tf.where(diff < 0), neg_weight)
     diff = tf.scatter_mul(K.variable(diff, ), tf.where(diff > 0),
                           pos_weight)
     # diff = tf.scatter_mul(diff, tf.where(diff > 0), pos_weight)
     diff = diff**2
     if array:
         return diff
     return K.mean(diff)
コード例 #4
0
    def test_builder_to_backend_programmatic(
        self, use_cpu_only, backend, rankData_rankIndices, accumulate_mode
    ):
        data_rank, indices_rank = rankData_rankIndices
        data_shape = np.random.randint(low=2, high=5, size=data_rank)
        indices_shape = np.random.randint(low=2, high=5, size=indices_rank)
        updates_shape = list(indices_shape) + list(data_shape[1:])

        data = np.random.rand(*data_shape).astype(np.float32)
        updates = np.random.rand(*updates_shape).astype(np.float32)
        indices = np.random.randint(0, data_shape[0], size=indices_shape).astype(
            np.int32
        )

        def build(data, indices, updates):
            return mb.scatter(
                data=data, indices=indices, updates=updates, mode=accumulate_mode
            )

        with tf.Graph().as_default(), tf.Session() as sess:
            tf_output = tf.Variable(data)
            sess.run(tf.global_variables_initializer())
            if accumulate_mode == "update":
                sess.run(tf.scatter_update(tf_output, indices, updates))
            if accumulate_mode == "add":
                sess.run(tf.scatter_add(tf_output, indices, updates))
            if accumulate_mode == "sub":
                sess.run(tf.scatter_sub(tf_output, indices, updates))
            if accumulate_mode == "mul":
                sess.run(tf.scatter_mul(tf_output, indices, updates))
            if accumulate_mode == "div":
                sess.run(tf.scatter_div(tf_output, indices, updates))
            if accumulate_mode == "max":
                sess.run(tf.scatter_max(tf_output, indices, updates))
            if accumulate_mode == "min":
                sess.run(tf.scatter_min(tf_output, indices, updates))
            expected_output = sess.run(tf_output)

        input_placeholders = {
            "data": mb.placeholder(shape=data.shape),
            "indices": mb.placeholder(shape=indices.shape, dtype=types.int32),
            "updates": mb.placeholder(shape=updates.shape),
        }

        input_values = {"data": data, "indices": indices, "updates": updates}

        expected_output_types = tuple(data_shape[:]) + (types.fp32,)
        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_output,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
コード例 #5
0
ファイル: ARU.py プロジェクト: pratham16cse/ARU
  def adapt(self, inputs_arg, labels, tsIndices, mask, valid=None, scope=None):
    """
    :param inputs: [Batch,time,depth]
    :param state:  tuple*batch
    :param labels: [batch,time, num_y]
    :param valid: [batch,time]
    :return:
    """
    # print("in adapt state",tf.shape(state))
    
    #if state is None:
    #  state = self.zero_state(tf.shape(labels)[0])

    inputs_arg =  tf.stop_gradient(inputs_arg)
    #sxxt, sxy, spx, spx2, sy, spxy, count = state
    # padding a constant to introduce a bias term.
    inputs = tf.concat([inputs_arg, tf.ones([tf.shape(inputs_arg)[0], tf.shape(inputs_arg)[1], 1])], axis=2)
    if valid is not None:
      inputs = tf.cond(valid, inputs, tf.zeros_like(inputs))
      count_op = tf.scatter_add(self.count, tsIndices, tf.reduce_sum(tf.cond(valid, 1, 0), axis=1))#*tf.squeeze(mask))
    else:
      count_op = tf.scatter_add(self.count, tsIndices, tf.to_float(tf.shape(labels)[1]))#*tf.squeeze(mask))

    if self._full_linear:
      xxt = tf.einsum('bti,btj->bij', inputs, inputs)# * tf.expand_dims(mask, axis=1)
      xy = tf.einsum('bti,bty->biy', inputs, labels)# * tf.expand_dims(mask, axis=1)
      if self._alpha is None:
        sxxt_op = tf.scatter_add(self.sxxt, tsIndices, xxt)
        sxy_op = tf.scatter_add(self.sxy, tsIndices, xy)
      else:
        sxxt_mul = tf.scatter_mul(self.sxxt, tsIndices, tf.broadcast_to(self._alpha, [tf.shape(tsIndices)[0], self._input_dims+1, self._input_dims+1, self._num_alpha]))
        sxxt_op = tf.scatter_add(sxxt_mul, tsIndices, tf.tile(tf.expand_dims(xxt, axis=-1), [1, 1, 1, self._num_alpha]))
        sxy_mul = tf.scatter_mul(self.sxy, tsIndices, tf.broadcast_to(self._alpha, [tf.shape(tsIndices)[0], self._input_dims+1, self._num_y, self._num_alpha]))
        sxy_op = tf.scatter_add(sxy_mul, tsIndices, tf.tile(tf.expand_dims(xy, axis=-1), [1, 1, 1, self._num_alpha]))


#    if self._nprojs > 0:
#      with tf.variable_scope(scope or type(self).__name__):
#          if self._proj_vecs is None:
#              self._project_vectors = tf.Variable(tf.truncated_normal(stddev=1.0, dtype=tf.float32, shape=[self._input_dims, self._nprojs]), name="proj", trainable=False)
#          else:
#              self._project_vectors = tf.get_variable("proj", initializer=tf.constant_initializer(self._proj_vecs), trainable=False)

    if self._nprojs > 0:
      projected_inputs = tf.einsum('bti,ip->btp', inputs_arg, self._project_vectors)
      if self._alpha is None:
        spx_op = tf.scatter_add(self.spx, tsIndices, tf.reduce_sum(projected_inputs, 1))# * mask)
        spx2_op = tf.scatter_add(self.spx2, tsIndices, tf.reduce_sum(projected_inputs*projected_inputs, 1))# * mask)
        spxy_op = tf.scatter_add(self.spxy, tsIndices, tf.einsum('btp,bty->bpy', projected_inputs, labels))# * tf.expand_dims(mask, axis=1))
        sy_op = tf.scatter_add(self.sy, tsIndices, tf.reduce_sum(labels, 1))# * mask)
      else:
        spx_mul = tf.scatter_mul(self.spx, tsIndices, tf.ones((tf.shape(tsIndices)[0], max(1, self._nprojs)))*self._alpha)
        spx_op = tf.scatter_add(spx_mul, tsIndices, tf.reduce_sum(projected_inputs, 1))#*mask)
        spx2_mul = tf.scatter_mul(self.spx2, tsIndices, tf.ones((tf.shape(tsIndices)[0], max(1, self._nprojs)))*self._alpha)
        spx2_op = tf.scatter_add(spx2_mul, tsIndices, tf.reduce_sum((projected_inputs*projected_inputs, 1)))#*mask)
        spxy_mul = tf.scatter_mul(self.spxy, tsIndices, tf.ones((tf.shape(tsIndices)[0], max(1, self._nprojs), self._num_y))*self._alpha)
        spxy_op = tf.scatter_add(spxy_mul, tsIndices, tf.einsum('btp,bty->bpy', projected_inputs, labels))#*tf.expand_dims(mask, axis=1))
        sy_mul = tf.scatter_mul(self.sy, tsIndices, tf.ones((tf.shape(tsIndices)[0], self._num_y))*self._alpha)
        sy_op = tf.scatter_add(sy_mul, tsIndices, tf.reduce_sum(labels, 1))#*mask)

    else:
        spx_op = self.spx
        spx2_op = self.spx2
        spxy_op = self.spxy
        sy_op = self.sy

    return ARUStateTuple(sxxt_op, sxy_op, spx_op, spx2_op, sy_op, spxy_op, count_op)
コード例 #6
0
    def optimize_graph(self, loss, freeze_vars=None, train_vars=None):
        """Build the graph to optimize the loss function."""

        # Global step
        self.global_step = tf.Variable(0, name="global_step")

        lr = self.learning_rate * tf.exp(
            -tf.cast(self.global_step, tf.float32) * self.lr_decay)

        # Instead of running optimizer.minimize directly, call compute gradients
        # and process returned gradients
        optimizer = tf.train.AdagradOptimizer(lr)
        grads_and_vars = optimizer.compute_gradients(loss)

        # Remove frozen indices from gradients
        processes_grads_and_vars = []
        for (g, v) in grads_and_vars:
            if freeze_vars and (v in freeze_vars):
                freeze_indices = freeze_vars[v]

                # Remove all gradients for this variable
                if freeze_indices == True:
                    g = None

                # Process dense gradients
                elif isinstance(g, tf.Tensor):
                    print("Freezing {} indicies of variable '{}' [D]".format(
                        len(freeze_indices), v.name))

                    update_shape = [len(freeze_indices)] + list(
                        g.get_shape()[1:])
                    gradient_mask = tf.zeros(update_shape, dtype=g.dtype)
                    g = tf.scatter_mul(g, freeze_indices, gradient_mask)

                # Process sparse gradients
                elif isinstance(g, tf.IndexedSlices):
                    print("Freezing {} indicies of variable '{}' [S]".format(
                        len(freeze_indices), v.name))

                    # Remove frozen indices from gradient
                    g = tf.sparse_mask(g, freeze_indices)

            if train_vars and (v in train_vars):
                trainable_indices = train_vars[v]

                # Process dense gradients
                if isinstance(g, tf.Tensor):
                    print("Training only on {} indicies of variable '{}' [D]".
                          format(len(freeze_indices), v.name))

                    gradient_mask = tf.scatter_nd(
                        tf.reshape(trainable_indices, [-1, 1]),
                        tf.ones(tf.get_shape(trainable_indices)),
                        [g.get_shape()[0], 1])
                    g = tf.multiply(g, gradient_mask)

                # Process sparse gradients
                elif isinstance(g, tf.IndexedSlices):
                    print("Training only on {} indicies of variable '{}' [S]".
                          format(len(freeze_indices), v.name))
                    raise RuntimeError

            processes_grads_and_vars.append((g, v))

        train = optimizer.apply_gradients(processes_grads_and_vars,
                                          global_step=self.global_step,
                                          name="train")

        tf.summary.scalar("Learning rate", lr)
        return train
コード例 #7
0
ファイル: reduce_ops.py プロジェクト: KonduitAI/ImportTests
 def execute_scatter_mul(self):
     # Create an intermediate variable - otherwise the scatter op will modify the variable content in-place
     # and hence we'll save the input post-modification, rather than pre-modification
     intermediate = tf.Variable(tf.zeros(self.shapes[0]), dtype=tf.float32)
     intermediate = tf.assign(intermediate, self.vars[0])
     return [tf.scatter_mul(ref=intermediate, indices=self.vars[1], updates=self.vars[2], name="scatter_mul-" + str(self.node_num))]
コード例 #8
0
tf.get_local_variable
tf.get_seed()
tf.get_session_handle()
tf.get_session_tensor()
tf.get_default_graph()
tf.get_summary_op()
tf.get_variable()
tf.get_variable_scope()
tf.set_random_seed()
tf.serialize_tensor()
tf.save_v2()
tf.scalar_mul()
tf.scan()
tf.scatter_add()
tf.scatter_div()
tf.scatter_mul()
tf.scatter_nd()
tf.scatter_nd_add()
tf.scatter_nd_non_aliasing_add()
tf.scatter_nd_sub()
tf.scatter_nd_update()
tf.scatter

tf.tables_initializer()
tf.tensordot()
tf.tf_logging
tf.tile()
tf.to_bfloat16()
tf.to_double()
tf.to_float()
tf.to_int32()
コード例 #9
0
title_tmp_indices = tf.cast(title_tmp_indices, dtype=tf.int32)

title_r_update = tf.reshape(tf.gather(title_r, title_tmp_indices), [-1])
title_g_update = tf.reshape(tf.gather(title_g, title_tmp_indices), [-1])
title_b_update = tf.reshape(tf.gather(title_b, title_tmp_indices), [-1])
title_a_update = tf.reshape(tf.gather(title_a, title_tmp_indices), [-1])

title_color_indices = tf.where(tf.not_equal(title_alpha, min_color_val))
title_color_indices = tf.cast(title_color_indices, dtype=tf.int32)
title_color_indices = title_color_indices + tf.Variable([title_y, title_x, 0], dtype=tf.int32)
title_color_indices1, title_color_indices2, title_color_indices3 = tf.split(title_color_indices, 3, axis=1)
output_indices = title_color_indices1 * output_width + title_color_indices2

output_indices = tf.reshape(output_indices, [-1])

output_r = tf.scatter_mul(output_r, output_indices, title_a_update)
output_g = tf.scatter_mul(output_g, output_indices, title_a_update)
output_b = tf.scatter_mul(output_b, output_indices, title_a_update)

output_r = tf.scatter_add(output_r, output_indices, title_r_update)
output_g = tf.scatter_add(output_g, output_indices, title_g_update)
output_b = tf.scatter_add(output_b, output_indices, title_b_update)

# lay credit over bg!
credit_r, credit_g, credit_b, credit_alpha = tf.split(credit_img, 4, 2)

credit_alpha = (credit_alpha - tf.reduce_min(credit_alpha)) / tf.reduce_max(credit_alpha) - tf.reduce_min(credit_alpha)

credit_r = tf.reshape(credit_r * credit_alpha, [-1])
credit_g = tf.reshape(credit_g * credit_alpha, [-1])
credit_b = tf.reshape(credit_b * credit_alpha, [-1])