Esempio n. 1
0
    def _compile(self):
        """
        compile the tensorflow function "self._objective"
        """
        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        minusF = tf.neg(f, name='objective')
        minusG = tf.neg(g, name='grad_objective')

        #initialize variables. I confess I don;t understand what this does - JH
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()

        def obj(x):
            return self._session.run([minusF, minusG],
                                     feed_dict={self._free_vars: x})

        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False
Esempio n. 2
0
    def _compile(self):
        """
        compile the tensorflow function "self._objective"
        """
        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        minusF = tf.neg( f, name = 'objective' )
        minusG = tf.neg( g, name = 'grad_objective' )

        #initialize variables. I confess I don;t understand what this does - JH
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()
        def obj(x):
            return self._session.run([minusF,minusG], feed_dict={self._free_vars: x})
        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False
Esempio n. 3
0
 def build_energy_op(self):
     with self.graph.as_default(), tf.device(self.device):
         # [1, nbatch]
         e_x_0 = tf.neg((self.state_pl[0, :] ** 2) / (self.scale ** 2), name='E_x_0')
         # [ndims - 1, nbatch]
         e_x_k = tf.neg((self.state_pl[1:, :] ** 2) / tf.exp(self.state_pl[0, :]), name='E_x_k')
         # [nbatch]
         self.energy_op = tf.reduce_sum(tf.add(e_x_0, e_x_k), 0, name='energy_op')
Esempio n. 4
0
    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        # Make float32 hack
        float32_hack = False
        if optimizer is not None:
            if tf.float64 not in optimizer._valid_dtypes(
            ) and tf.float32 in optimizer._valid_dtypes():
                print("Using float32 hack for Tensorflow optimizers...")
                float32_hack = True

        self._free_vars = tf.Variable(self.get_free_state())
        if float32_hack:
            self._free_vars32 = tf.Variable(self.get_free_state().astype(
                np.float32))
            self._free_vars = tf.cast(self._free_vars32, tf.float64)

        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        self._minusF = tf.neg(f, name='objective')
        self._minusG = tf.neg(g, name='grad_objective')

        # The optimiser needs to be part of the computational graph, and needs
        # to be initialised before tf.initialise_all_variables() is called.
        if optimizer is None:
            opt_step = None
        else:
            if float32_hack:
                opt_step = optimizer.minimize(tf.cast(self._minusF,
                                                      tf.float32),
                                              var_list=[self._free_vars32])
            else:
                opt_step = optimizer.minimize(self._minusF,
                                              var_list=[self._free_vars])
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()

        def obj(x):
            return self._session.run([self._minusF, self._minusG],
                                     feed_dict={self._free_vars: x})

        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
Esempio n. 5
0
 def energy_function(self, input, w_test, v_test, h_test, sigma_test):
     wh = tf.matmul(tf.div(input, sigma_test**2), w_test) + h_test
     step1 = tf.reduce_sum(tf.div(tf.square(input - v_test), 2*tf.square(sigma_test)), reduction_indices=[1])
     wh_plus = tf.maximum(wh, 0)
     step2 = step1 - tf.reduce_sum(tf.log(tf.exp(tf.neg(wh_plus))+tf.exp(wh - wh_plus)) + wh_plus,
                                   reduction_indices=[1])
     step3 = tf.transpose(step2)
     e = tf.reduce_mean(step3)
     e_sum = tf.reduce_sum(tf.neg(step3))
     tf.scalar_summary('energy_val', e_sum)
     return step3, e, e_sum
Esempio n. 6
0
def twins_relu(x):
    """
    return [relu(x), -relu(-x)]
    """
    from tflearn import utils
    #from tflearn.layers.conv import max_pool_2d
    input_shape = get_incoming_shape(x)
    res = tf.concat(len(input_shape)-1, [tf.nn.relu(x),  tf.neg(tf.nn.relu(tf.neg(x)))])
    
    #print(tf.shape(res))
    
    return res
Esempio n. 7
0
def BiBU(x):
    """
    A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
    Binary Branch Units, binarizing twins_relu
    works not good, shold continue experimenting new binary methods for twins_relu.
    """
    input_shape = get_incoming_shape(x)
    binary_x = tf.sign(x)
    forward = tf.concat(len(input_shape)-1, [tf.nn.relu(binary_x),  tf.neg(tf.nn.relu(tf.neg(binary_x)))])
    backward = tf.concat(len(input_shape)-1, [tf.nn.relu(x),  tf.neg(tf.nn.relu(tf.neg(x)))])
    
    return backward + tf.stop_gradient(forward - backward)
Esempio n. 8
0
 def testArithmeticRenames(self):
     with self.test_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.0j)).eval(), 5)
Esempio n. 9
0
 def build_energy_op(self):
     with self.graph.as_default(), tf.device(self.energy_device):
         # [1, nbatch]
         e_x_0 = tf.neg((self.state_pl[0, :]**2) / (self.scale**2),
                        name='E_x_0')
         # [ndims - 1, nbatch]
         e_x_k = tf.neg(
             (self.state_pl[1:, :]**2) / tf.exp(self.state_pl[0, :]),
             name='E_x_k')
         # [nbatch]
         self.energy_op = tf.reduce_sum(tf.add(e_x_0, e_x_k),
                                        0,
                                        name='energy_op')
Esempio n. 10
0
    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        self._graph = tf.Graph()
        self._session = tf.Session(graph=self._graph)
        with self._graph.as_default():
            self._free_vars = tf.Variable(self.get_free_state())

            self.make_tf_array(self._free_vars)
            with self.tf_mode():
                f = self.build_likelihood() + self.build_prior()
                g, = tf.gradients(f, self._free_vars)

            self._minusF = tf.neg(f, name='objective')
            self._minusG = tf.neg(g, name='grad_objective')

            # The optimiser needs to be part of the computational graph, and needs
            # to be initialised before tf.initialise_all_variables() is called.
            if optimizer is None:
                opt_step = None
            else:
                opt_step = optimizer.minimize(self._minusF,
                                              var_list=[self._free_vars])
            init = tf.initialize_all_variables()
        self._session.run(init)

        # build tensorflow functions for computing the likelihood
        if settings.verbosity.tf_compile_verb:
            print("compiling tensorflow function...")
        sys.stdout.flush()

        self._feed_dict_keys = self.get_feed_dict_keys()

        def obj(x):
            feed_dict = {self._free_vars: x}
            self.update_feed_dict(self._feed_dict_keys, feed_dict)
            f, g = self._session.run([self._minusF, self._minusG],
                                     feed_dict=feed_dict)
            return f.astype(np.float64), g.astype(np.float64)

        self._objective = obj
        if settings.verbosity.tf_compile_verb:
            print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
Esempio n. 11
0
    def testInitializerFunction(self):
        value = [[-42], [133.7]]
        shape = [2, 1]
        with self.test_session():
            initializer = lambda: tf.constant(value)
            with self.assertRaises(ValueError):
                # Checks that dtype must be specified.
                tf.Variable(initializer)

            v1 = tf.Variable(initializer, dtype=tf.float32)
            self.assertEqual(shape, v1.get_shape())
            self.assertAllClose(value, v1.initial_value.eval())
            with self.assertRaises(tf.errors.FailedPreconditionError):
                v1.eval()

            v2 = tf.Variable(tf.neg(v1.initialized_value()), dtype=tf.float32)
            self.assertEqual(v1.get_shape(), v2.get_shape())
            self.assertAllClose(np.negative(value), v2.initial_value.eval())

            # Once v2.initial_value.eval() has been called, v1 has effectively been
            # initialized.
            self.assertAllClose(value, v1.eval())

            with self.assertRaises(tf.errors.FailedPreconditionError):
                v2.eval()
            tf.initialize_all_variables().run()
            self.assertAllClose(np.negative(value), v2.eval())
Esempio n. 12
0
    def test_cwise_unary_grad(self):
        """
        Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
        """
        with tf.Session() as s:
            arg_np = np.random.random(100)
            grad_above = tf.constant(np.random.random(100))

            arg = tf.constant(arg_np)

            def test_grad(fcn, tf_fcn):
                ovl_out = as_tensorflow(fcn(arg))
                tf_out = tf_fcn(arg)

                ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
                tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
                ovl_out, tf_out, ovl_grad, tf_grad = s.run(
                    [ovl_out, tf_out, ovl_grad, tf_grad])

                assert np.allclose(ovl_out, tf_out)
                assert np.allclose(ovl_grad, tf_grad)

            test_grad(lambda x: neg(x), lambda x: tf.neg(x))
            test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
            test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
Esempio n. 13
0
    def get_layers(self):
        input_vars, predict_op = self.get_predict_op()

        action = tf.placeholder(tf.int32, shape=(None,), name='action')
        reward = tf.placeholder(tf.float32, shape=(None,), name='reward')
        credit = tf.placeholder(tf.float32, shape=(None,), name='credit')
        label_vars = [action, reward, credit]

        if self.options.pg_normalize:
            reward_mean, reward_variance = tfutils.moments(reward)
            normalized = tf.nn.batch_normalization(reward, reward_mean, reward_variance,
                                                   scale=1.0, offset=0.0, variance_epsilon=1e-4)
        else:
            normalized = reward
        opt = tf.train.RMSPropOptimizer(learning_rate=self.options.learning_rate)
        logp = tf.neg(tf.nn.sparse_softmax_cross_entropy_with_logits(predict_op, action),
                      name='action_log_prob')
        signal = tf.mul(logp, normalized * credit, name='signal')
        signal_down = tf.reduce_sum(tf.slice(tf.reshape(signal, [-1, 10]),
                                             [0, 4], [-1, 1]),
                                    [0], name='signal_down')
        if self.options.verbosity >= 5:
            print_node = tf.Print(signal, [signal_down], message='signal_down: ', summarize=10)
            with tf.control_dependencies([print_node]):
                signal = tf.identity(signal)
        loss = tf.reduce_mean(-signal, name='loss')
        var_list = tf.trainable_variables()
        print('Trainable variables:')
        for var in var_list:
            print(var.name)
        train_op = minimize_with_grad_clip(opt, self.options.pg_grad_clip,
                                           loss, var_list=var_list)

        return input_vars, label_vars, train_op, predict_op
Esempio n. 14
0
def hard_resp(pw_matrix, k):
    '''
    hard_resp
        Calculates the hard KNN responsibility vector
    '''
    #We need to index the closest values
    ref_matrix = tf.neg(tf.transpose(pw_matrix))
    values, indices = tf.nn.top_k(ref_matrix, k, sorted=False)
    #Generate the indices from top_k (adapted liberally from Stack Overflow)
    range_repeated = tf.tile(
        tf.expand_dims(tf.range(0,
                                tf.shape(indices)[0]), 1), [1, k])
    # Tiime to update
    full_indices = tf.reshape(
        tf.concat(
            2, [tf.expand_dims(range_repeated, 2),
                tf.expand_dims(indices, 2)]), [-1, 2])
    update = tf.mul(
        tf.truediv(tf.constant(1.0, dtype=tf.float64), tf.cast(k, tf.float64)),
        tf.ones(tf.shape(values), dtype=tf.float64))
    return tf.sparse_to_dense(full_indices,
                              tf.shape(ref_matrix),
                              tf.reshape(update, [-1]),
                              default_value=0.,
                              validate_indices=False)
Esempio n. 15
0
def step(model, current, dt):
    with tf.name_scope('neurons'):
        v_rest = tf.constant([model.v_rest] * model.size, name='v_rest')
        r_m = tf.constant([model.r_m] * model.size, name='r_m')
        c_m = tf.constant([model.c_m] * model.size, name='c_m')
        tau_m = tf.mul(r_m, c_m, name='tau_m')
        tau_ref = tf.constant([model.tau_ref] * model.size, name='tau_ref')
        v_spike = tf.constant([model.v_spike] * model.size, name='v_spike')

        v_m = tf.Variable(tf.zeros(model.size), trainable=True, name='v_m')
        tf.scalar_summary(['v_m'] * model.size, v_m)

        t_rest = tf.Variable(tf.zeros(model.size),
                             trainable=True,
                             name='t_rest')

        v_m_ = (tf.neg(v_m) + current * r_m) / tau_m * dt

        # neurons = tf.dynamic_partition(t_rest, tf.greater(t_rest, 0.0), 2)
        # neurons[0]
        def resting_op():
            return tf.tuple((t_rest.assign_sub(dt), v_m.assign(v_rest)))

        def spiking_op():
            return tf.tuple(
                (t_rest.assign_add(tau_ref), v_m.assign_add(v_spike)))

        def responding_op():
            return tf.tuple((t_rest, v_m.assign_add(v_m_)))

        _step = tf.case(
            ((tf.reshape(tf.greater(t_rest, 0), []), resting_op),
             (tf.reshape(tf.greater(v_m, v_spike), []), spiking_op)),
            responding_op)
    return _step
Esempio n. 16
0
  def testInitializerFunction(self):
    value = [[-42], [133.7]]
    shape = [2, 1]
    with self.test_session():
      initializer = lambda: tf.constant(value)
      with self.assertRaises(ValueError):
        # Checks that dtype must be specified.
        tf.Variable(initializer)

      v1 = tf.Variable(initializer, dtype=tf.float32)
      self.assertEqual(shape, v1.get_shape())
      self.assertAllClose(value, v1.initial_value.eval())
      with self.assertRaises(tf.errors.FailedPreconditionError):
        v1.eval()

      v2 = tf.Variable(tf.neg(v1.initialized_value()), dtype=tf.float32)
      self.assertEqual(v1.get_shape(), v2.get_shape())
      self.assertAllClose(np.negative(value), v2.initial_value.eval())

      # Once v2.initial_value.eval() has been called, v1 has effectively been
      # initialized.
      self.assertAllClose(value, v1.eval())

      with self.assertRaises(tf.errors.FailedPreconditionError):
        v2.eval()
      tf.initialize_all_variables().run()
      self.assertAllClose(np.negative(value), v2.eval())
Esempio n. 17
0
def build_graph_tf():
    a = tf.constant(2)
    b = tf.constant(3)
    c = tf.add(a, b)
    c2 = tf.mul(a, b)
    d = tf.neg(c)
    return a, b, c, c2, d
Esempio n. 18
0
    def test_cwise_unary_grad(self):
        """
        Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
        """
        test_config = tf.ConfigProto(allow_soft_placement=False)
        test_config.graph_options.optimizer_options.opt_level = -1
        with tf.Session(config=test_config) as s:
            arg_np = np.random.random(100)
            grad_above = tf.constant(np.random.random(100))

            arg = tf.constant(arg_np)

            def test_grad(fcn, tf_fcn):
                ovl_out = as_tensorflow(fcn(arg))
                tf_out = tf_fcn(arg)

                ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
                tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
                ovl_out, tf_out, ovl_grad, tf_grad = s.run([ovl_out, tf_out, ovl_grad, tf_grad])

                assert np.allclose(ovl_out, tf_out)
                assert np.allclose(ovl_grad, tf_grad)

            test_grad(lambda x: neg(x), lambda x: tf.neg(x))
            test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
            test_grad(lambda x: sin(x), lambda x: tf.sin(x))
            test_grad(lambda x: cos(x), lambda x: tf.cos(x))
            test_grad(lambda x: tan(x), lambda x: tf.tan(x))
            test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
Esempio n. 19
0
def gauss(mean, stddev, ksize):
    """Uses Tensorflow to compute a Gaussian Kernel.

    Parameters
    ----------
    mean : float
        Mean of the Gaussian (e.g. 0.0).
    stddev : float
        Standard Deviation of the Gaussian (e.g. 1.0).
    ksize : int
        Size of kernel (e.g. 16).

    Returns
    -------
    kernel : np.ndarray
        Computed Gaussian Kernel using Tensorflow.
    """
    g = tf.Graph()
    with tf.Session(graph=g):
        x = tf.linspace(-3.0, 3.0, ksize)
        z = (
            tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
                          (2.0 * tf.pow(stddev, 2.0)))) *
            (1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
        return z.eval()
Esempio n. 20
0
def tf_neg_masked_scaled_root_mean_square_errors(y_est, y_masked, y_std, do_neg_rmdse=False):
    with tf.name_scope("MSE"):
        y = y_masked[:, :, 0]
        y_mask = y_masked[:, :, 1]
        y_est.get_shape().assert_is_compatible_with(y.get_shape())
        y_diff_masked = tf.multiply(
            tf.subtract(y_est, y),
            y_mask,
            name="y_diff_masked")
        y_mask_count = tf.reduce_sum(
            y_mask,
            name = "y_mask_count")
        mse = tf.div(
            tf.reduce_sum(tf.square(y_diff_masked)),
            y_mask_count,
            name="MSE")
        rmse = tf.sqrt(
            tf.div(
                tf.reduce_sum(
                    tf.square(
                        tf.multiply(
                            y_diff_masked,
                            y_std))),
                y_mask_count),
            name="RMSE")
        if do_neg_rmdse:
            rmse = tf.neg(rmse, name="-RMSE")
        return mse, rmse
def CreateRegressionNetwork(input_d, output_d, num_hidden=20, 
              learning_rate=0.01, correlation_loss=False):
  g = tf.Graph()
  with g.as_default():
    x1 = tf.placeholder(tf.float32, shape=(None, input_d), name='x1') # Will be batch_size x input_d
    W1 = tf.Variable(tf.random_uniform([input_d,num_hidden], -1.0, 1.0), name='W1')  # input_d x num_hidden
    b1 = tf.Variable(tf.zeros([num_hidden]), name='bias1')
    y1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(x1,W1), b1), name='y1') # batch_size x num_hidden
  
    W2 = tf.Variable(tf.random_uniform([num_hidden,output_d], -1.0, 1.0), name='W2')
    b2 = tf.Variable(tf.zeros([output_d]), name='b2')
    y2 = tf.nn.bias_add(tf.matmul(y1,W2), b2, name='y2') # Will be batch_size x output_d
    ytrue = tf.placeholder(tf.float32, shape=(None, output_d), name='ytrue') # num_batch x output_d
  
    if correlation_loss:
      # Compute the correlation
      r = PearsonCorrelationTF(ytrue, y2)
      tf.scalar_summary('correlation', r)
      loss = tf.neg(r, name='loss_pearson')
    else:
      # Minimize the mean squared errors.
      loss = tf.reduce_mean(tf.square(y2 - ytrue), name='loss_euclidean')
      tf.scalar_summary('loss', loss)
  
    # https://www.quora.com/Which-optimizer-in-TensorFlow-is-best-suited-for-learning-regression
    # optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train = optimizer.minimize(loss)
  
    # Before starting, initialize the variables.  We will 'run' this first.
    init = tf.initialize_all_variables()
    saver = tf.train.Saver()
    merged_summaries = tf.merge_all_summaries()
  return g, train, loss, init, x1, y2, ytrue, merged_summaries, saver
Esempio n. 22
0
def cross_entropy_loss(y, yhat):
  """
  Compute the cross entropy loss in tensorflow.

  y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
  of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
  be of dtype tf.float32.

  The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
  solutions are possible, so you may not need to use all of these functions).

  Note: You are NOT allowed to use the tensorflow built-in cross-entropy
        functions.

  Args:
    y:    tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
    yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
          probability distribution and should sum to 1.
  Returns:
    out:  tf.Tensor with shape (1,) (Scalar output). You need to construct this
          tensor in the problem.
  """
  ### YOUR CODE HERE
  score = tf.neg(tf.mul(tf.to_float(y), tf.log(yhat))) 
  out = tf.reduce_sum(score) 
  ### END YOUR CODE
  return out
    def get_mapping(self, max_iterations=None):
        """ find the best mapping according to the given emission and entries matrix
        Return:
            a mapping between PDTB relations and RST relations. Mapping[i, j] = P(RST_j|PDTB_i)
        """
        if max_iterations is None:
            max_iterations = 50000
            
        sdc = Lexconn.__one_hot_encoding(self.__entries[:, 0], self.__dc_cnt)
        src = Lexconn.__one_hot_encoding(self.__entries[:, 1], self.__rst_cnt)

        sdc_e = np.dot(sdc, self.__emission) #select emission row for each entry in LEXCONN
        init_value = np.random.rand(self.__pdtb_cnt, self.__rst_cnt).astype(np.float32)
        m_logits = tf.Variable(init_value)  #parameters of mapping probabilities
        m = tf.nn.softmax(m_logits)     #Mapping probabilities
        src_m = tf.matmul(m, src.T)     #select mapping rows for each entry in LEXCONN
        score = tf.matmul(sdc_e, src_m) #calculate the probabilities of all possible combination of connectives and relations
        log_score = tf.log(score)
        sum_log_score = tf.trace(log_score) #we only need those that have the same index in LEXCONN
        optimizer = tf.train.AdamOptimizer(0.01).minimize(tf.neg(sum_log_score))
        init = tf.initialize_all_variables()
        with tf.Session() as sess:
            sess.run(init)
            #print(sess.run(m))
            print(sess.run(sum_log_score))

            # Fit the line.
            np.set_printoptions(precision=4)
            np.set_printoptions(suppress=True)
            for step in range(max_iterations):
                sess.run(optimizer)
                if step % 1000 == 0:
                    print(step, sess.run(sum_log_score))
            return sess.run(m)
Esempio n. 24
0
def model(train, label, test):
    dist = tf.reduce_sum(tf.abs(tf.add(train, tf.neg(test))),
                         reduction_indices=1)
    min_index = tf.argmin(dist, 0)
    min_index = tf.cast(min_index, tf.int32)
    pred = label[min_index]
    return pred
Esempio n. 25
0
def build_graph_tf():
	a=tf.constant(2)
	b=tf.constant(3)
	c=tf.add(a,b)
	c2=tf.mul(a,b)
	d=tf.neg(c)
	return a,b,c,c2,d
Esempio n. 26
0
def cross_entropy_loss(y, yhat):
    """
    Compute the cross entropy loss in tensorflow.
    The loss should be summed over the current minibatch.

    y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
    of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
    be of dtype tf.float32.

    The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
    solutions are possible, so you may not need to use all of these functions).

    Note: You are NOT allowed to use the tensorflow built-in cross-entropy
                functions.

    Args:
        y:    tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
        yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
                    probability distribution and should sum to 1.
    Returns:
        out:  tf.Tensor with shape (1,) (Scalar output). You need to construct this
                    tensor in the problem.
    """

    ### YOUR CODE HERE
    log_y = tf.log(yhat)
    temp = log_y * tf.to_float(y)
    out = tf.neg(tf.reduce_sum(temp))
    ### END YOUR CODE

    return out
Esempio n. 27
0
 def initializeKnn(self):        
     if self.qualitative_outputs:            
         n_input = self.input_end_column - self.input_start_column + 1            
         self.tf_in = tf.placeholder("float", [None, n_input])
         self.tf_testing = tf.placeholder("float", [n_input])
         
         # Calculate L1 Distance
         self.distance = tf.reduce_sum(tf.abs(tf.add(self.tf_in, tf.neg(self.tf_testing))), reduction_indices=1)
         # Predict: Get min distance index (Nearest neighbor)
         self.prediction = tf.arg_min(self.distance, 0)
         
         init = tf.initialize_all_variables()
         self.sess = tf.Session()
         self.sess.run(init)
         accuracy = 0
         #output part
         for i in range(len(self.testing_data)):
             # Get nearest neighbor
             nn_index = self.sess.run(self.prediction, feed_dict={self.tf_in: self.training_data, self.tf_testing: self.testing_data[i,:]})
             # Calculate accuracy
             if np.argmax(self.training_outputs[nn_index]) == np.argmax(self.testing_outputs[i]):
                 accuracy += 1./len(self.testing_data)
         self.accuracy = accuracy
         self.epochs_for_accuracy = "N/A"
         self.best_accuracy = "N/A"
         self.epochs_for_best_accuracy = "N/A"
         self.trained = True
     else:
         raise ValueError("NOT IMPLEMENTED")
Esempio n. 28
0
  def testSideEffect(self):
    a = tf.constant(1)
    b = tf.constant(1)
    c = tf.add(a, b)
    with tf.control_dependencies([c]):
      d = tf.constant(42)
    n = tf.neg(c)

    shared = []

    def sub(t):
      shared.append(t)
      return t

    c = subscribe.subscribe(c, lambda t: tf.py_func(sub, [t], [t.dtype]))

    with self.test_session() as sess:
      c_out = sess.run([c])
      n_out = sess.run([n])
      d_out = sess.run([d])

    self.assertEquals(n_out, [-2])
    self.assertEquals(c_out, [2])
    self.assertEquals(d_out, [42])
    self.assertEquals(shared, [2, 2, 2])
Esempio n. 29
0
    def test_cwise_unary_grad(self):
        """
        Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
        """
        test_config = tf.ConfigProto(allow_soft_placement=False)
        test_config.graph_options.optimizer_options.opt_level = -1
        with tf.Session(config=test_config) as s:
            arg_np = np.random.random(100)
            grad_above = tf.constant(np.random.random(100))

            arg = tf.constant(arg_np)

            def test_grad(fcn, tf_fcn):
                ovl_out = as_tensorflow(fcn(arg))
                tf_out = tf_fcn(arg)

                ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
                tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
                ovl_out, tf_out, ovl_grad, tf_grad = s.run(
                    [ovl_out, tf_out, ovl_grad, tf_grad])

                assert np.allclose(ovl_out, tf_out)
                assert np.allclose(ovl_grad, tf_grad)

            test_grad(lambda x: neg(x), lambda x: tf.neg(x))
            test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
            test_grad(lambda x: sin(x), lambda x: tf.sin(x))
            test_grad(lambda x: cos(x), lambda x: tf.cos(x))
            test_grad(lambda x: tan(x), lambda x: tf.tan(x))
            test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
Esempio n. 30
0
def w(input_data, cu, kappas_t_1, config):
	
	batch_size = config.batch_size
	mixture_size = config.mixture_size
	vocab_length = config.vocab_length

	# split along dim of mixture size * 3
	hat_alphas_t, hat_betas_t, hat_kappas_t = tf.split(1, 3, input_data)

	alphas_t = tf.exp(hat_alphas_t)
	betas_t = tf.exp(hat_betas_t)
	kappas_t = tf.add(kappas_t_1, tf.exp(hat_kappas_t))

	speech_length = tf.shape(cu)[1]

	u = tf.linspace(1.0, tf.cast(speech_length,tf.float32) , speech_length)
	u = tf.expand_dims(u, 0)
	u = tf.expand_dims(u, 0)
	u = tf.tile(u, [batch_size, mixture_size, 1])

	alphas_t_expanded = tf.tile(tf.expand_dims(alphas_t, -1), [1, 1, speech_length])
	betas_t_expanded = tf.tile(tf.expand_dims(betas_t, -1), [1, 1, speech_length])
	kappas_t_expanded = tf.tile(tf.expand_dims(kappas_t, -1), [1, 1, speech_length])

	calc = tf.square(tf.sub(kappas_t_expanded, u))
	calc = tf.mul(calc, tf.neg(betas_t_expanded))
	calc = tf.exp(calc)
	calc = tf.mul(calc, alphas_t_expanded)

	phi_t = tf.expand_dims(tf.reduce_sum(calc, 1), 1)

	output = tf.squeeze(tf.batch_matmul(phi_t, cu), [1])

	return output, kappas_t, phi_t
Esempio n. 31
0
 def compile(self,
             optimizer=tf.train.AdamOptimizer(),
             collection=graph_key.VARIABLES,
             global_step=None):
     """
     Create self.method_op and self.optimize_op.
     args:
     - optimzer: instance of tf.train.optimizer.
     - collection: variable collection that will be optimized.
     - global_step: If want to decrease learning rate, global_step can be
                     passed.
     """
     print('compiling...')
     var_list = self.model.get_tf_variables(collection)
     with self.model.tf_mode():
         self.method_op = self.likelihood_method(self.model)
         self.optimize_op = optimizer.minimize(tf.neg(self.method_op),
                                               var_list=var_list,
                                               global_step=global_step)
     # manual initialization.
     self.model.initialize()
     # initialize un-initialized variable
     self.model._session.run(
         tf.initialize_variables([
             v for v in tf.all_variables()
             if not self.model._session.run(tf.is_variable_initialized(v))
         ]))
     # make validation
     self.model.validate()
Esempio n. 32
0
 def testArithmeticRenames(self):
     with self.cached_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(
             tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(
             s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(
             tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1., 2., 3.], [4., 5., 6.]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(
             tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.j)).eval(), 5)
    def testSideEffect(self):
        a = tf.constant(1)
        b = tf.constant(1)
        c = tf.add(a, b)
        with tf.control_dependencies([c]):
            d = tf.constant(42)
        n = tf.neg(c)

        shared = []

        def sub(t):
            shared.append(t)
            return t

        c = subscribe.subscribe(c, lambda t: tf.py_func(sub, [t], [t.dtype]))

        with self.test_session() as sess:
            c_out = sess.run([c])
            n_out = sess.run([n])
            d_out = sess.run([d])

        self.assertEquals(n_out, [-2])
        self.assertEquals(c_out, [2])
        self.assertEquals(d_out, [42])
        self.assertEquals(shared, [2, 2, 2])
Esempio n. 34
0
    def __build_graph(self):
        self.__graph = tf.Graph()
        with self.__graph.as_default(), self.__graph.device(_device_for_node):
            count_max = tf.constant([self.cooccurrence_cap], dtype=tf.float32,
                                    name='max_cooccurrence_count')
            scaling_factor = tf.constant([self.scaling_factor], dtype=tf.float32,
                                         name="scaling_factor")

            self.__focal_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                name="focal_words")
            self.__context_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                  name="context_words")
            self.__cooccurrence_count = tf.placeholder(tf.float32, shape=[self.batch_size],
                                                       name="cooccurrence_count")

            focal_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="focal_embeddings")
            context_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="context_embeddings")

            focal_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                       name='focal_biases')
            context_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                         name="context_biases")

            focal_embedding = tf.nn.embedding_lookup([focal_embeddings], self.__focal_input)
            context_embedding = tf.nn.embedding_lookup([context_embeddings], self.__context_input)
            focal_bias = tf.nn.embedding_lookup([focal_biases], self.__focal_input)
            context_bias = tf.nn.embedding_lookup([context_biases], self.__context_input)

            weighting_factor = tf.minimum(
                1.0,
                tf.pow(
                    tf.div(self.__cooccurrence_count, count_max),
                    scaling_factor))
            # weighting_factor = 1

            embedding_product = tf.reduce_sum(tf.mul(focal_embedding, context_embedding), 1)

            log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))
            # log_cooccurrences = self.__cooccurrence_count


            distance_expr = tf.square(tf.add_n([
                embedding_product,
                focal_bias,
                context_bias,
                tf.neg(log_cooccurrences)]))

            single_losses = tf.mul(weighting_factor, distance_expr)
            self.__total_loss = tf.reduce_sum(single_losses)
            tf.scalar_summary("GloVe loss", self.__total_loss)
            self.__optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(
                self.__total_loss)
            self.__summary = tf.merge_all_summaries()

            self.__combined_embeddings = tf.add(focal_embeddings, context_embeddings,
                                                name="combined_embeddings")
Esempio n. 35
0
def sqk_gpr(input1, input2, l):
    '''
    sqk_gpr
        Calculates the squared exponential kernel
    '''
    #We need to index the closest values
    return tf.exp(l * tf.neg(get_distance_matrix(input1, input2)))
Esempio n. 36
0
    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        # Make float32 hack
        float32_hack = False
        if optimizer is not None:
            if tf.float64 not in optimizer._valid_dtypes() and tf.float32 in optimizer._valid_dtypes():
                print("Using float32 hack for Tensorflow optimizers...")
                float32_hack = True

        self._free_vars = tf.Variable(self.get_free_state())
        if float32_hack:
            self._free_vars32 = tf.Variable(self.get_free_state().astype(np.float32))
            self._free_vars = tf.cast(self._free_vars32, tf.float64)

        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        self._minusF = tf.neg( f, name = 'objective' )
        self._minusG = tf.neg( g, name = 'grad_objective' )

        # The optimiser needs to be part of the computational graph, and needs
        # to be initialised before tf.initialise_all_variables() is called.
        if optimizer is None:
            opt_step = None
        else:
            if float32_hack:
                opt_step = optimizer.minimize(tf.cast(self._minusF, tf.float32), var_list=[self._free_vars32])
            else:
                opt_step = optimizer.minimize(self._minusF, var_list=[self._free_vars])
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()
        def obj(x):
            return self._session.run([self._minusF, self._minusG], feed_dict={self._free_vars: x})
        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
Esempio n. 37
0
    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        self._graph = tf.Graph()
        self._session = tf.Session(graph=self._graph)
        with self._graph.as_default():
            self._free_vars = tf.Variable(self.get_free_state())

            self.make_tf_array(self._free_vars)
            with self.tf_mode():
                f = self.build_likelihood() + self.build_prior()
                g, = tf.gradients(f, self._free_vars)

            self._minusF = tf.neg(f, name='objective')
            self._minusG = tf.neg(g, name='grad_objective')

            # The optimiser needs to be part of the computational graph, and needs
            # to be initialised before tf.initialise_all_variables() is called.
            if optimizer is None:
                opt_step = None
            else:
                opt_step = optimizer.minimize(self._minusF,
                                              var_list=[self._free_vars])
            init = tf.initialize_all_variables()
        self._session.run(init)

        # build tensorflow functions for computing the likelihood
        if settings.verbosity.tf_compile_verb:
            print("compiling tensorflow function...")
        sys.stdout.flush()

        self._feed_dict_keys = self.get_feed_dict_keys()
        def obj(x):
            feed_dict = {self._free_vars: x}
            self.update_feed_dict(self._feed_dict_keys, feed_dict)
            f, g = self._session.run([self._minusF, self._minusG],
                                     feed_dict=feed_dict)
            return f.astype(np.float64), g.astype(np.float64)

        self._objective = obj
        if settings.verbosity.tf_compile_verb:
            print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
Esempio n. 38
0
    def test_neg(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(20, 30))
        neg_a = tf.neg(a)

        # test
        feed_dict = {a: np.random.rand(*tf_to_shape_tuple(a))}
        self.run(neg_a, tf_feed_dict=feed_dict)
Esempio n. 39
0
 def calculate_loss(self, predictions, labels, **unused_params):
     with tf.name_scope("loss_xent"):
         epsilon = 10e-8
         float_labels = tf.cast(labels, tf.float32)
         cross_entropy_loss = float_labels * tf.log(
             predictions + epsilon) + (
                 1 - float_labels) * tf.log(1 - predictions + epsilon)
         cross_entropy_loss = tf.neg(cross_entropy_loss)
         return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
Esempio n. 40
0
 def _compute_constant(self):
     """
     Computes constant term in lml.
     """
     self.constant = tf.reshape(tf.neg(
         (tf.cast(self.n_tf, dtype=tf.float32) / 2.0) *
         tf.log(2 * math.pi)),
                                shape=[1, 1],
                                name='constant')
Esempio n. 41
0
    def create_network(self):
        networks = {}

        with tf.variable_scope('q_net'):

            # Input parameters
            x = networks['x'] = tf.placeholder(tf.float32, \
                            shape=[None, self.states], name='states')
            u = networks['u'] = tf.placeholder(tf.float32, \
                            shape=[None, self.actions], name='actions')

            # hidden layers
            init = 1. / self.hidden_nodes / self.actions

            hid = tf.concat(1, [x, u])
            hid = fully_connected(hid, self.hidden_nodes, \
                weights_initializer=tf.random_normal_initializer(init, init/5), \
                biases_initializer=tf.random_normal_initializer(init, init/5), \
                activation_fn=tf.tanh)

            for i in xrange(self.hidden_layers - 1):
                hid = fully_connected(hid, self.hidden_nodes, \
                    weights_initializer=tf.random_normal_initializer(init, init/5), \
                    biases_initializer=tf.random_normal_initializer(init, init/5), \
                    activation_fn=tf.nn.relu)

            # Output parameters
            pos_layer = fully_connected(hid, 1, \
                weights_initializer=tf.random_normal_initializer(1./self.actions, 0.1), \
                biases_initializer=tf.random_normal_initializer(1./self.actions, 0.1))
            neg_layer = tf.neg(fully_connected(hid, 1, \
                weights_initializer=tf.random_normal_initializer(1./self.actions, 0.1), \
                biases_initializer=tf.random_normal_initializer(1./self.actions, 0.1)))

            Q = networks['Q'] = pos_layer + neg_layer

            # Describe loss functions.
            y_ = networks['y_'] = tf.placeholder(tf.float32, [None, 1],
                                                 name='y_i')

            # Tensor outputs to calculate y_i values
            networks['reward'] = tf.placeholder(tf.float32, [None, 1],
                                                name='reward')
            networks['y_calc'] = tf.add(networks['reward'],
                                        tf.mul(Q, self.gamma))

            networks['mse'] = tf.reduce_mean(tf.squared_difference(y_, \
                            Q), name='mse')
            networks['cross_entropy'] = -tf.reduce_sum(y_ * tf.log(Q),
                                                       name='cross_entropy')

            networks['optimize'] = tf.train.AdamOptimizer(\
                        learning_rate=self.alpha) \
                        .minimize(networks['mse'])

        self.tensors = networks
        return
Esempio n. 42
0
def psnr_loss(inference_tensor, reference_tensor, name="loss_layer"):
    with tf.name_scope(name) as scope:
        l2 = tf.square(inference_tensor - reference_tensor,
                       name='l2_difference')
        MSE = tf.reduce_mean(l2, name='MSE')
        # MSE = tf.nn.l2_loss(inference_tensor - reference_tensor, name='MSE')
        loss = tf.neg(tf.log(tf.inv(tf.sqrt(MSE + FLAGS.eps))), name='psnr')
        tf.add_to_collection('losses', loss)
    return loss
Esempio n. 43
0
    def __build_graph(self):
        self.__graph = tf.Graph()
        with self.__graph.as_default(), self.__graph.device(_device_for_node):
            count_max = tf.constant([self.cooccurrence_cap], dtype=tf.float32,
                                    name='max_cooccurrence_count')
            scaling_factor = tf.constant([self.scaling_factor], dtype=tf.float32,
                                         name="scaling_factor")

            self.__focal_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                name="focal_words")
            self.__context_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                  name="context_words")
            self.__cooccurrence_count = tf.placeholder(tf.float32, shape=[self.batch_size],
                                                       name="cooccurrence_count")

            focal_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="focal_embeddings")
            context_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="context_embeddings")

            focal_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                       name='focal_biases')
            context_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                         name="context_biases")

            focal_embedding = tf.nn.embedding_lookup([focal_embeddings], self.__focal_input)
            context_embedding = tf.nn.embedding_lookup([context_embeddings], self.__context_input)
            focal_bias = tf.nn.embedding_lookup([focal_biases], self.__focal_input)
            context_bias = tf.nn.embedding_lookup([context_biases], self.__context_input)

            weighting_factor = tf.minimum(
                1.0,
                tf.pow(
                    tf.div(self.__cooccurrence_count, count_max),
                    scaling_factor))

            embedding_product = tf.reduce_sum(tf.mul(focal_embedding, context_embedding), 1)

            log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))

            distance_expr = tf.square(tf.add_n([
                embedding_product,
                focal_bias,
                context_bias,
                tf.neg(log_cooccurrences)]))

            single_losses = tf.mul(weighting_factor, distance_expr)
            self.__total_loss = tf.reduce_sum(single_losses)
            tf.scalar_summary("GloVe loss", self.__total_loss)
            self.__optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(
                self.__total_loss)
            self.__summary = tf.merge_all_summaries()

            self.__combined_embeddings = tf.add(focal_embeddings, context_embeddings,
                                                name="combined_embeddings")
    def loss_function(self):
        pos_diff = self.anchor - self.positive
        neg_diff = self.anchor - self.negative

        pos_dist = tf.reduce_sum(tf.mul(pos_diff, pos_diff), 1)
        neg_dist = tf.reduce_sum(tf.mul(neg_diff, neg_diff), 1)

        triplet = tf.add(self.ALPHA, tf.add(pos_dist, tf.neg(neg_dist)))
        return tf.reduce_sum(tf.nn.relu(triplet))
def binary_cross_entropy(prediction, target):
    """
    let o=prediction, t=target
    -(t*log(o) + (1-t)*log(1-o))
    
    Adds a small (1e-12) value to the logarithms to avoid log(0)
    """
    op1 = tf.mul(target, tf.log(prediction + 1e-12))
    op2 = tf.mul(tf.sub(1., target), tf.log(tf.sub(1., prediction) + 1e-12))
    return tf.neg(tf.add(op1, op2))
Esempio n. 46
0
def gaussian_kernel(tensor_a, a_inputs, tensor_b, b_inputs, gamma):
    """Returns the Gaussian kernel matrix of two matrices of vectors
    element-wise."""
    cross = cross_matrices(tensor_a, a_inputs, tensor_b, b_inputs)

    kernel = tf.exp(tf.mul(tf.reduce_sum(tf.square(
        tf.sub(cross[0], cross[1])), reduction_indices=2),
        tf.neg(tf.constant(gamma, dtype=tf.float32))))

    return kernel
Esempio n. 47
0
def activation(type, synapse):
    """Chooses the activation function to use."""
    if type == "sigmoid":
        return tf.sigmoid(synapse)
    elif type == "linear":
        return synapse
    elif type == "tanh":
        return tf.tanh(synapse)
    elif type == "radial":
        return tf.sqrt(tf.exp(tf.neg(tf.square(synapse))))
def gabor(n_values=32, sigma=1.0, mean=0.0):
    x = tf.linspace(-3.0, 3.0, n_values)
    z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
                       (2.0 * tf.pow(sigma, 2.0)))) *
         (1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
    gauss_kernel = tf.matmul(
        tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
    x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
    y = tf.reshape(tf.ones_like(x), [1, n_values])
    gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
    return gabor_kernel
Esempio n. 49
0
	def test_1(self):
		"""
		ht->tf
		"""
		a=tf.constant(2)
		b=tf.constant(3)
		c=tdb.python_op(myadd,inputs=[a,b],outputs=[tf.placeholder(tf.int32)]) # a+b
		d=tf.neg(c)
		status,result=tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False)	
		self.assertEqual(status, tdb.FINISHED)
		self.assertEqual(result[0],-5)
def loss(output,target):
    output = tf.squeeze(output,squeeze_dims=[3])
    all_true_probability = output
    all_false_probability = tf.sub(tf.constant(1,dtype=tf.float32),output)
    tf.squeeze(target,squeeze_dims=[0])
    actual_probability = tf.select(target,all_true_probability,all_false_probability)
    log_probability = tf.log(actual_probability)
    total_log_prob = tf.reduce_sum(log_probability,name='log_loss')
    total_log_loss = tf.neg(total_log_prob)
    
    return total_log_loss
Esempio n. 51
0
    def _compile(self, optimizer=None, verbose = False):
        """
        compile the tensorflow function "self._objective"
        """
        self._free_vars = tf.Variable(self.get_free_state())

        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        self._minusF = tf.neg(f, name='objective')
        self._minusG = tf.neg(g, name='grad_objective')

        # The optimiser needs to be part of the computational graph, and needs
        # to be initialised before tf.initialise_all_variables() is called.
        if optimizer is None:
            opt_step = None
        else:
            opt_step = optimizer.minimize(self._minusF,
                                          var_list=[self._free_vars])
        init = tf.initialize_all_variables()
        self._session.run(init)

        # build tensorflow functions for computing the likelihood
        if verbose:
            print("compiling tensorflow function...")
            
        sys.stdout.flush()

        def obj(x):
            return self._session.run([self._minusF, self._minusG],
                                     feed_dict={self._free_vars: x})

        self._objective = obj
        if verbose:
            print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
Esempio n. 52
0
    def __init__(self, num_words):
        # Define the hyperparameters
        self.dim = embed_dim  # The size of the learned embeddings
        self.alpha = glove_alpha
        self.xmax = glove_xmax
        self.learning_rate = glove_learning_rate
        self.batch_size = glove_batch_size
        self.training_epochs = glove_training_epochs
        self.display_epoch_freq = 10  # How often to test and print out statistics
        self.num_words = num_words  # The number of vectors to learn
        self.embeddings_cache = None  # To be set later

        #self.inter_sess = tf.InteractiveSession();

        # Define the inputs to the model
        self.Wi_input = tf.placeholder(tf.int32, None)
        self.Wj_input = tf.placeholder(tf.int32, None)
        self.coocur_input = tf.placeholder(tf.float32, None)

        # Define the trainable parameters of the model
        self.embeddings = tf.Variable(tf.random_uniform([len(vocabulary), self.dim], 1.0, -1.0))
        self.bias = tf.Variable(tf.zeros([len(vocabulary)]))

        # Define the forward computation of the model
        self.Wi_embeddings = tf.reshape(tf.nn.embedding_lookup([self.embeddings], self.Wi_input),
                                        [self.batch_size, 1, self.dim ])
        # print(tf.shape(self.Wi_embeddings))
        self.Wj_embeddings = tf.reshape(tf.nn.embedding_lookup([self.embeddings], self.Wj_input),
                                        [self.batch_size, self.dim, 1])
        self.bi = tf.reshape(tf.nn.embedding_lookup([self.bias], self.Wi_input), [self.batch_size, 1])
        self.bj = tf.reshape(tf.nn.embedding_lookup([self.bias], self.Wj_input), [self.batch_size, 1])
        self.dot_products = tf.reshape(tf.batch_matmul(self.Wi_embeddings, self.Wj_embeddings), [self.batch_size, 1])
        # tf.Print(self.dot_products, [self.dot_products], message="This is a: ")
        self.square_term = tf.square(tf.add_n([self.dot_products, self.bi, self.bj,
                                               tf.neg(tf.log(tf.to_float(self.coocur_input)))]))
        self.weight_factor = tf.minimum(1.0, tf.pow(tf.div(self.coocur_input, self.xmax), self.alpha))
        self.example_cost = tf.mul(self.weight_factor, self.square_term)

        # Define the cost function
        self.total_cost = tf.reduce_mean(self.example_cost)

        # Train
        self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.total_cost)

        # Initialize variables
        self.init = tf.initialize_all_variables()

        # Initialize the model
        self.sess = tf.Session()
        self.sess.run(self.init)
Esempio n. 53
0
def logsigmoid(input_tensor, name='logsigmoid'):
    '''Compute the log-sigmoid of an input tensor:

        ``f(x) = - log(1 + exp(-x))``

    Parameters
    ----------
    input_tensor : tf.Tensor
        The input tensor to scale

    Returns
    -------
    logsigmoid : tf.Operator
        The log-sigmoid operator
    '''
    with tf.name_scope(name):
        output = tf.neg(tf.nn.softplus(-input_tensor))
    return output
Esempio n. 54
0
 def __init__(self, label, domain, layers=default_layers, defined=None, type_idx=None):
     self.label = label
     self.type_idx = type_idx
     self.defined = defined
     self.domain = domain
     self.number_of_layers = layers
     self.W = tf.Variable(tf.zeros([layers,
                                           self.domain.columns,
                                           self.domain.columns]),
                          name="W"+label)
     self.V = tf.Variable(tf.zeros([layers,
                                            self.domain.columns]),
                          name="V"+label)
     self.b = tf.Variable(tf.neg(tf.ones([1,layers])),
                          name="b"+label)
     self.u = tf.Variable(tf.ones([layers,1]),
                          name="u"+label)
     self.parameters = [self.W, self.V, self.b, self.u]
Esempio n. 55
0
    def fit(self):
        self.init = tf.initialize_all_variables()

        distance = tf.reduce_sum(tf.abs(tf.add(self.xtr, tf.neg(self.xte))), reduction_indices=1)
        self.pred = tf.arg_min(distance, 0)
        accuracy = 0.

        if self.x_test is None:
            return

        with tf.Session() as sess:
            sess.run(self.init)

            for i in range(len(self.x_test)):
                nn_index = sess.run(self.pred, feed_dict={self.xtr: self.x_train, self.xte: self.x_test[i, :]})
                # print "Test", i, "Prediction:", np.argmax(self.y_train[nn_index]), \
                #     "True Class:", np.argmax(self.y_test[i])
                if np.argmax(self.y_train[nn_index]) == np.argmax(self.y_test[i]):
                    accuracy += 1. / len(self.x_test)
            print "Done!"
            print "Accuracy:", accuracy
            return accuracy
Esempio n. 56
0
def gauss(mean, stddev, ksize):
    """Use Tensorflow to compute a Gaussian Kernel.

    Parameters
    ----------
    mean : float
        Mean of the Gaussian (e.g. 0.0).
    stddev : float
        Standard Deviation of the Gaussian (e.g. 1.0).
    ksize : int
        Size of kernel (e.g. 16).

    Returns
    -------
    kernel : np.ndarray
        Computed Gaussian Kernel using Tensorflow.
    """
    g = tf.Graph()
    with tf.Session(graph=g):
        x = tf.linspace(-3.0, 3.0, ksize)
        z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
                           (2.0 * tf.pow(stddev, 2.0)))) *
             (1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
        return z.eval()
Esempio n. 57
0
def gaussian2d(x, y, cx, cy, a, b, dtype = tf.float32):
    """
    This cunction calcuate sum of N 2D Gaussian probability density
    functions in m points
    y, x : m x n 2D tensor. Position of calculation points 
      m is number of calculation points
      n is number of Gaussian functions
    cx, cy, a, b : m x n 2D tensor.
      Parameters of Gaussian function
      cx and cy are center position
      a and b are the width in x and y firection
    """
    # A = 1/(2*pi*a*b)
    A = tf.inv(tf.mul(tf.constant(2.0*np.pi, dtype), tf.mul(a, b)))
    # powerX = (x-xc)^2 / (2*a^2)
    powerX = tf.truediv(tf.pow(tf.sub(x, cx) , tf.constant(2.0, dtype)),
      tf.mul(tf.constant(2.0, dtype),tf.pow(a, tf.constant(2.0, dtype))))
    # powerY = (y-yc)^2 / (2*b^2)
    powerY = tf.truediv(tf.pow(tf.sub(y, cy) , tf.constant(2.0, dtype)),
      tf.mul(tf.constant(2.0, dtype),tf.pow(a, tf.constant(2.0, dtype))))
    # p = A*exp(- powerX - powerY)    standard 2D Gaussian distribution
    probability = tf.reduce_sum(
      tf.mul(A, tf.exp(tf.neg(tf.add(powerX, powerY)))), 1)
    return probability
Esempio n. 58
0
  def setUp(self):
    self.a = tf.Variable(2.0, name="a")
    self.b = tf.Variable(3.0, name="b")

    self.c = tf.mul(self.a, self.b, name="c")  # Should be 6.0.
    self.d = tf.mul(self.a, self.a, name="d")  # Should be 4.0.

    self.e = tf.mul(self.d, self.c, name="e")  # Should be 24.0.

    self.f_y = tf.constant(0.30, name="f_y")
    self.f = tf.div(self.b, self.f_y, name="f")  # Should be 10.0.

    # The there nodes x, y and z form a graph with "cross-links" in. I.e., x
    # and y are both direct inputs to z, but x is also a direct input to y.
    self.x = tf.Variable(2.0, name="x")  # Should be 2.0
    self.y = tf.neg(self.x, name="y")  # Should be -2.0.

    self.z = tf.mul(self.x, self.y, name="z")  # Should be -4.0.

    self.sess = tf.Session()
    self.sess.run(tf.global_variables_initializer())

    self.sess = tf.Session()
    self.sess.run(tf.global_variables_initializer())
x.eval(session=sess)
# x.eval() does not work, as it requires a session!

# %% We can setup an interactive session if we don't
# want to keep passing the session around:
sess.close()
sess = tf.InteractiveSession()

# %% Now this will work!
x.eval()

# %% Now a tf.Operation
# We'll use our values from [-3, 3] to create a Gaussian Distribution
sigma = 1.0
mean = 0.0
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
                   (2.0 * tf.pow(sigma, 2.0)))) *
     (1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))

# %% By default, new operations are added to the default Graph
assert z.graph is tf.get_default_graph()

# %% Execute the graph and plot the result
plt.plot(z.eval())

# %% We can find out the shape of a tensor like so:
print(z.get_shape())

# %% Or in a more friendly format
print(z.get_shape().as_list())

# %% Sometimes we may not know the shape of a tensor