Example #1
0
    def _autoencoder(self):

        updates = []
        layers = self._layers[:-1]
        for i, layer in enumerate(layers):
            a = layer.output  # [3000, 784]
            u = self._layers[i + 1].output
            kernel = layer.kernel
            temporary_shape = utility.transpose_shape(kernel)  # [1000, 784]
            if layer.use_bias:
                temporary_shape[0] += 1
                kernel = tf.concat((kernel, layer.bias[None, ...]), axis=0)
            temporary_kernel = tf.get_variable(
                'temporal_{}'.format(i),
                temporary_shape,
                dtype=tf.float64,
                initializer=tf.contrib.layers.xavier_initializer(),
                trainable=False)
            u, _ = mf.semi_nmf(
                a=a,
                u=u,
                v=temporary_kernel,
                use_tf=True,
                use_bias=layer.use_bias,
                num_iters=1,
                first_nneg=True,
            )

            # Not use activation (ReLU)
            if not layer.activation:
                _, v = mf.semi_nmf(
                    a=u,
                    u=a,
                    v=kernel,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_iters=1,
                    first_nneg=True,
                )
            # Use activation (ReLU)
            # else utility.get_op_name(layer.activation) == 'Relu':
            else:
                _, v = mf.nonlin_semi_nmf(
                    a=u,
                    u=a,
                    v=kernel,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_calc_v=0,
                    num_calc_u=1,
                    first_nneg=True,
                )
            if layer.use_bias:
                v, bias = utility.split_v_bias(v)
                updates.append(layer.bias.assign(bias))
            updates.append(layer.kernel.assign(v))
        return tf.group(*updates)
 def test_tf_vanilla_semi_nmf(self):
     a = np.random.uniform(-1., 1., size=(100, 100))
     u = np.random.uniform(0., 1., size=(100, 300))
     v = np.random.uniform(-1., 1., size=(300, 100))
     old_loss = np_frobenius_norm(a, u @ v)
     
     # [1000, 500]
     a_ph = tf.placeholder(tf.float64, shape=a.shape)
     # [1000, 201]
     u_ph = tf.placeholder(tf.float64, shape=u.shape)
     # [200, 500]
     v_ph = tf.placeholder(tf.float64, shape=v.shape)
     tf_u, tf_v = semi_nmf(a_ph, u_ph, v_ph, use_tf=True, use_bias=False)
     tf_loss = frobenius_norm(a_ph, tf.matmul(tf_u, tf_v))
     
     init = tf.global_variables_initializer()
     with tf.Session() as sess:
         init.run()
         
         start_time = time.time()
         _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss], feed_dict={a_ph: a, u_ph: u, v_ph: v})
         assert np.min(_u) > 0, np.min(_u)
         end_time = time.time()
     
     duration = end_time - start_time
     assert a.shape == (_u @ _v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print('\n[TensorFlow]Solve semi-NMF\n\t'
           'old loss {0}\n\t'
           'new loss {1}\n\t'
           'process duration {2}'.format(old_loss, new_loss, duration))
     print_format('TensorFlow', 'semi-NMF', a, u, v, old_loss, new_loss, duration)
 def test_tf_biased_semi_nmf(self):
     auv = sio.loadmat(mat_file)
     a, u, v = auv['a'], auv['u'], auv['v']
     bias_v = np.vstack((v, np.ones((1, v.shape[1]))))
     old_loss = np_frobenius_norm(a, u @ v)
     
     a_ph = tf.placeholder(tf.float64, shape=a.shape)
     u_ph = tf.placeholder(tf.float64, shape=u.shape)
     bias_v_ph = tf.placeholder(tf.float64, shape=bias_v.shape)
     
     tf_bias_u, tf_v = semi_nmf(a_ph, u_ph, bias_v_ph, use_bias=True, use_tf=True)
     
     init = tf.global_variables_initializer()
     with tf.Session() as sess:
         init.run()
         
         start_time = time.time()
         _u, _bias_v = sess.run([tf_bias_u, tf_v], feed_dict={a_ph: a, u_ph: u, bias_v_ph: bias_v})
         end_time = time.time()
     
     duration = end_time - start_time
     _bias_u = np.hstack((_u, np.ones((_u.shape[0], 1))))
     new_loss = np_frobenius_norm(a, _bias_u @ _bias_v)
     assert a.shape == (_bias_u @ _bias_v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print_format('TensorFlow', 'biased semi-NMF', a, _bias_u, _bias_v, old_loss, new_loss, duration)
Example #4
0
    def test_simplest_factorize(self):
        print()
        model = benchmark_model.build_tf_model()
        ops = utility.get_train_ops()
        layers = utility.zip_layer(inputs=model.inputs, ops=ops)

        hidden = layers[-1].output
        last_weights = layers[-1].kernel
        tf_u, tf_v = semi_nmf(model.labels,
                              hidden,
                              last_weights,
                              use_tf=True,
                              use_bias=False,
                              num_iters=3)
        _old_local_loss = losses.frobenius_norm(model.labels,
                                                hidden @ last_weights)
        _new_local_loss = losses.frobenius_norm(model.labels, tf_u @ tf_v)

        x, y = benchmark_model.build_data(batch_size, label_size)
        init = tf.global_variables_initializer()
        with self.test_session() as sess:
            sess.run(init)
            old_local_loss, new_local_loss = sess.run(
                [_old_local_loss, _new_local_loss],
                feed_dict={
                    model.inputs: x,
                    model.labels: y,
                })
            self.assertGreater(old_local_loss, new_local_loss)
            print("old {} new {}".format(old_local_loss, new_local_loss))
Example #5
0
    def test_tf_vanilla_semi_nmf(self):
        auv = sio.loadmat(mat_file)
        a, u, v = auv['a'], auv['u'], auv['v']
        old_loss = np_frobenius_norm(a, u @ v)

        # [1000, 500]
        a_ph = tf.placeholder(tf.float64, shape=a.shape)
        # [1000, 201]
        u_ph = tf.placeholder(tf.float64, shape=u.shape)
        # [200, 500]
        v_ph = tf.placeholder(tf.float64, shape=v.shape)
        tf_u, tf_v = semi_nmf(a_ph, u_ph, v_ph, use_tf=True, use_bias=False)
        tf_loss = frobenius_norm(a_ph, tf.matmul(tf_u, tf_v))

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()

            start_time = time.time()
            _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss],
                                        feed_dict={
                                            a_ph: a,
                                            u_ph: u,
                                            v_ph: v
                                        })
            end_time = time.time()

        duration = end_time - start_time
        assert a.shape == (_u @ _v).shape
        assert new_loss < old_loss, "new loss should be less than old loss."
        print('\n[TensorFlow]Solve semi-NMF\n\t'
              'old loss {0}\n\t'
              'new loss {1}\n\t'
              'process duration {2}'.format(old_loss, new_loss, duration))
Example #6
0
 def minimize(self, loss=None):
     """Construct the control dependencies for calculating neural net optimized.
     
     Returns:
         tf.no_op.
         The import
     """
     self._init(loss)
     
     if self._use_autoencoder:
         self._autoencoder()
     
     a = self.labels
     updates = []
     # Reverse
     layers = self._layers[::-1]
     for i, layer in enumerate(layers):
         _u = layer.output
         v = layer.kernel
         
         # Check whether u is a tensor or not.
         #  that is Recurrent output if it have dim more than 3.
         if _u.shape.ndims >= 3 and not isinstance(layer.recurrent, tf.Variable):
             u = _u[:, -1, :]
         else:
             u = _u
             
         if isinstance(layer.recurrent, tf.Variable):
             v = tf.concat((layer.kernel, layer.recurrent), axis=0)
         
         if layer.use_bias:
             v = tf.concat((v, layer.bias[None, ...]), axis=0)
         
         # Not use activation (ReLU)
         if not layer.activation:
             u, v = mf.semi_nmf(a=a, u=u, v=v,
                                use_tf=True,
                                use_bias=layer.use_bias,
                                num_iters=1,
                                first_nneg=True,
                                )
         # Use activation (ReLU)
         else:
             u, v = mf.nonlin_semi_nmf(a=a, u=u, v=v,
                                       use_tf=True,
                                       use_bias=layer.use_bias,
                                       num_calc_v=1,
                                       num_calc_u=1,
                                       first_nneg=True,
                                       )
         if layer.use_bias:
             v, bias = utility.split_v_bias(v)
             updates.append(layer.bias.assign(bias))
         updates.append(layer.kernel.assign(v))
         a = tf.identity(_u)
     
     return tf.group(*updates)
Example #7
0
 def test_check_shape(self):
     print()
     a = tf.random_uniform((100, 50))
     u = tf.random_uniform((100, 25))
     v = tf.random_uniform((25, 50))
     tf_u, tf_v = mf.semi_nmf(a, u, v, use_tf=True)
     print(tf_u, tf_v)
     assert u.shape == tf_u.shape
     assert v.shape == tf_v.shape
     
Example #8
0
    def minimize(self, loss=None):
        """Construct the control dependencies for calculating neural net optimized.
        
        Returns:
            tf.no_op.
            The import
        """
        self._init(loss)

        if self._use_autoencoder:
            self._autoencoder()

        a = self.labels
        updates = []
        # Reverse
        layers = self._layers[::-1]
        for i, layer in enumerate(layers):
            u = layer.output
            v = layer.kernel
            if layer.use_bias:
                v = tf.concat((v, layer.bias[None, ...]), axis=0)

            # Not use activation (ReLU)
            if not layer.activation:
                u, v = mf.semi_nmf(
                    a=a,
                    u=u,
                    v=v,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_iters=1,
                    first_nneg=True,
                )
            # Use activation (ReLU)
            else:
                u, v = mf.nonlin_semi_nmf(
                    a=a,
                    u=u,
                    v=v,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_calc_v=1,
                    num_calc_u=1,
                    first_nneg=True,
                )
            if layer.use_bias:
                v, bias = utility.split_v_bias(v)
                updates.append(layer.bias.assign(bias))
            updates.append(layer.kernel.assign(v))
            a = tf.identity(u)

        return tf.group(*updates)
 def test_np_u_neg_matlab_semi_nmf(self):
     a = np.random.uniform(size=(100, 100))
     u = np.random.uniform(-1., 1., size=(100, 50))
     v = np.random.uniform(0., 1., size=(50, 100))
     old_loss = np_frobenius_norm(a, u @ v)
     
     start_time = time.time()
     
     u, v = semi_nmf(a, u, v, use_bias=False, data_format=False)
     assert np.min(u) < 0, np.min(u)
     assert np.min(v) > 0, np.min(v)
     
     end_time = time.time()
     duration = end_time - start_time
     
     new_loss = np_frobenius_norm(a, u @ v)
     assert a.shape == (u @ v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print_format('Numpy', 'semi-NMF', a, u, v, old_loss, new_loss, duration)
Example #10
0
    def test_np_vanilla_semi_nmf(self):
        auv = sio.loadmat(mat_file)
        a, u, v = auv['a'], auv['u'], auv['v']
        old_loss = np_frobenius_norm(a, u @ v)

        start_time = time.time()

        u, v = semi_nmf(a, u, v, use_bias=False)

        end_time = time.time()
        duration = end_time - start_time

        new_loss = np_frobenius_norm(a, u @ v)
        assert a.shape == (u @ v).shape
        assert new_loss < old_loss, "new loss should be less than old loss."
        print('\n[Numpy]Solve semi-NMF\n\t'
              'old loss {0}\n\t'
              'new loss {1}\n\t'
              'process duration {2}'.format(old_loss, new_loss, duration))
 def test_np_biased_semi_nmf(self):
     a = np.random.uniform(-1., 1., size=(100, 100))
     u = np.random.uniform(0., 1., size=(100, 300))
     v = np.random.uniform(-1., 1., size=(300, 100))
     old_loss = np_frobenius_norm(a, u @ v)
     
     bias_v = np.vstack((v, np.ones((1, v.shape[1]))))
     start_time = time.time()
     
     u, bias_v = semi_nmf(a, u, bias_v, use_bias=True)
     assert np.min(u) > 0, np.min(u)
     
     end_time = time.time()
     duration = end_time - start_time
     
     bias_u = np.hstack((u, np.ones((u.shape[0], 1))))
     
     new_loss = np_frobenius_norm(a, bias_u @ bias_v)
     assert a.shape == (bias_u @ bias_v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print_format('Numpy', 'biased semi-NMF', a, bias_u, bias_v, old_loss, new_loss, duration)
Example #12
0
    def test_np_biased_semi_nmf(self):
        auv = sio.loadmat(mat_file)
        a, u, v = auv['a'], auv['u'], auv['v']
        old_loss = np_frobenius_norm(a, u @ v)

        u = np.hstack((u, np.ones((u.shape[0], 1))))
        start_time = time.time()

        u, v = semi_nmf(a, u, v, use_bias=True)

        end_time = time.time()
        duration = end_time - start_time

        bias_v = np.vstack((v, np.ones((1, v.shape[1]))))

        new_loss = np_frobenius_norm(a, u @ bias_v)
        assert a.shape == (u @ bias_v).shape
        assert new_loss < old_loss, "new loss should be less than old loss."
        print('\n[Numpy]Solve biased semi-NMF\n\t'
              'old loss {0}\n\t'
              'new loss {1}\n\t'
              'process duration {2}'.format(old_loss, new_loss, duration))
Example #13
0
    def minimize(self, loss=None, pretrain=False):
        """Construct the control dependencies for calculating neural net optimized.
        
        Returns:
            tf.no_op.
            The import
        """
        self._init(loss)
        # pre-train with auto encoder.
        pretrain_op = self._autoencoder() if pretrain else tf.no_op()

        a = self.labels
        updates = []
        # Reverse
        layers = self._layers[::-1]
        for i, layer in enumerate(layers):
            u = layer.output
            v = layer.kernel
            if layer.use_bias:
                v = tf.concat((v, layer.bias[None, ...]), axis=0)

            # Not use activation (ReLU)
            if not layer.activation:
                u, v = mf.semi_nmf(
                    a=a,
                    u=u,
                    v=v,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_iters=1,
                    first_nneg=True,
                )
            # Use activation (ReLU)
            elif utility.get_op_name(layer.activation) == 'Relu':
                u, v = mf.nonlin_semi_nmf(
                    a=a,
                    u=u,
                    v=v,
                    use_tf=True,
                    use_bias=layer.use_bias,
                    num_calc_v=1,
                    num_calc_u=1,
                    first_nneg=True,
                )
            # Use Softmax
            elif utility.get_op_name(layer.activation) == 'Softmax':
                print('used softmax!!')
                u, v = mf.softmax_nmf(
                    a=a,
                    u=u,
                    v=v,
                    use_tf=True,
                    use_bias=layer.use_bias,
                )
            if layer.use_bias:
                v, bias = utility.split_v_bias(v)
                updates.append(layer.bias.assign(bias))
            updates.append(layer.kernel.assign(v))
            a = tf.identity(u)

        return AttrDict(ae=pretrain_op, nmf=tf.group(*updates))