Esempio n. 1
0
    def test_simplest_factorize(self):
        print()
        model = benchmark_model.build_tf_model()
        ops = utility.get_train_ops()
        layers = utility.zip_layer(inputs=model.inputs, ops=ops)

        hidden = layers[-1].output
        last_weights = layers[-1].kernel
        tf_u, tf_v = semi_nmf(model.labels,
                              hidden,
                              last_weights,
                              use_tf=True,
                              use_bias=False,
                              num_iters=3)
        _old_local_loss = losses.frobenius_norm(model.labels,
                                                hidden @ last_weights)
        _new_local_loss = losses.frobenius_norm(model.labels, tf_u @ tf_v)

        x, y = benchmark_model.build_data(batch_size, label_size)
        init = tf.global_variables_initializer()
        with self.test_session() as sess:
            sess.run(init)
            old_local_loss, new_local_loss = sess.run(
                [_old_local_loss, _new_local_loss],
                feed_dict={
                    model.inputs: x,
                    model.labels: y,
                })
            self.assertGreater(old_local_loss, new_local_loss)
            print("old {} new {}".format(old_local_loss, new_local_loss))
Esempio n. 2
0
def build_tf_hitachi_simple_model(batch_size,
                                  feature_size=3,
                                  label_size=4,
                                  use_bias=False,
                                  activation=None):
    inputs = tf.placeholder(tf.float64, (batch_size, feature_size),
                            name='inputs')
    labels = tf.placeholder(tf.float64, (batch_size, label_size),
                            name='labels')
    x = tf.layers.dense(inputs, 1000, activation=activation, use_bias=use_bias)
    x = tf.layers.dense(x, 500, activation=activation, use_bias=use_bias)
    outputs = tf.layers.dense(x,
                              label_size,
                              activation=None,
                              use_bias=use_bias)
    losses = frobenius_norm(labels, outputs)
    loss = tf.reduce_mean(losses)
    mse = loss / tf.sqrt(tf.constant(batch_size,
                                     tf.float64))  # teached from SAKURAI
    # mse_losses = losses / tf.sqrt(tf.constant(batch_size, tf.float64))  # teached from SAKURAI
    mse_losses = losses
    return agents.tools.AttrDict(
        inputs=inputs,
        outputs=outputs,
        labels=labels,
        loss=loss,
        mse=mse,
        mse_losses=mse_losses,
    )
Esempio n. 3
0
def build_tf_hitachi_model(batch_size,
                           feature_size=3,
                           label_size=4,
                           use_bias=False,
                           activation=None):
    num_layers = 7
    units = 128
    inputs = tf.placeholder(tf.float64, (batch_size, feature_size),
                            name='inputs')
    labels = tf.placeholder(tf.float64, (batch_size, label_size),
                            name='labels')
    x = inputs
    for i in range(num_layers):
        x = tf.layers.dense(x,
                            units=units,
                            activation=activation,
                            use_bias=use_bias)
    outputs = tf.layers.dense(x,
                              label_size,
                              activation=None,
                              use_bias=use_bias)
    losses = frobenius_norm(labels, outputs)
    loss = tf.reduce_mean(losses)
    return agents.tools.AttrDict(
        inputs=inputs,
        outputs=outputs,
        labels=labels,
        loss=loss,
    )
Esempio n. 4
0
def build_tf_one_hot_model(batch_size,
                           shape=784,
                           use_bias=False,
                           activation=None,
                           use_softmax=False):
    inputs = tf.placeholder(tf.float64, (batch_size, shape), name='inputs')
    labels = tf.placeholder(tf.float64, (batch_size, 10), name='labels')

    activation = None or activation
    x = tf.layers.dense(inputs, 1000, activation=activation, use_bias=use_bias)
    x = tf.layers.dense(x, 500, activation=activation, use_bias=use_bias)
    outputs = tf.layers.dense(x, 10, activation=None, use_bias=use_bias)

    losses = frobenius_norm(labels, outputs)
    frob_norm = tf.reduce_mean(losses)
    other_losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                              logits=outputs)
    cross_entropy = tf.reduce_mean(other_losses)

    correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(outputs, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100.

    return agents.tools.AttrDict(
        inputs=inputs,
        outputs=outputs,
        labels=labels,
        frob_norm=frob_norm,
        cross_entropy=cross_entropy,
        accuracy=accuracy,
    )
Esempio n. 5
0
    def test_u_neg_nonlin_semi_nmf(self):
        mat_file = python_path.joinpath('./u_neg.mat').as_posix()
        auv = sio.loadmat(mat_file)
        a, u, v = auv['a'], auv['u'], auv['v']

        old_loss = np_frobenius_norm(a, u @ v)

        a_ph = tf.placeholder(tf.float64, shape=a.shape)
        u_ph = tf.placeholder(tf.float64, shape=u.shape)
        v_ph = tf.placeholder(tf.float64, shape=v.shape)
        tf_u, tf_v = tf.py_func(nonlin_semi_nmf, [a_ph, u_ph, v_ph],
                                [tf.float64, tf.float64])
        tf_loss = frobenius_norm(a_ph, tf.nn.relu(tf_u @ tf_v))

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()

            start_time = time.time()
            _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss],
                                        feed_dict={
                                            a_ph: a,
                                            u_ph: u,
                                            v_ph: v
                                        })
            end_time = time.time()

        duration = end_time - start_time
        assert a.shape == (_u @ _v).shape
        assert new_loss < old_loss, "new loss should be less than old loss."
        print('solve Nonlinear semi-NMF\n\t'
              'old loss {0}\n\t'
              'new loss {1}\n\t'
              'process duration {2}'.format(old_loss, new_loss, duration))
 def test_tf_not_calc_v_nonlin_semi_nmf(self):
     auv = sio.loadmat(mat_file)
     a, u, v = auv['a'], auv['u'], auv['v']
     old_loss = np_frobenius_norm(a, u @ v)
     
     # [1000, 500]
     a_ph = tf.placeholder(tf.float64, shape=a.shape)
     # [1000, 201]
     u_ph = tf.placeholder(tf.float64, shape=u.shape)
     # [200, 500]
     v_ph = tf.placeholder(tf.float64, shape=v.shape)
     tf_u, tf_v = nonlin_semi_nmf(a_ph, u_ph, v_ph, use_tf=True, use_bias=False, num_calc_v=0, num_calc_u=1)
     tf_loss = frobenius_norm(a_ph, tf.nn.relu(tf.matmul(tf_u, tf_v)))
     
     init = tf.global_variables_initializer()
     with tf.Session() as sess:
         init.run()
         
         start_time = time.time()
         _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss], feed_dict={a_ph: a, u_ph: u, v_ph: v})
         end_time = time.time()
     
     duration = end_time - start_time
     assert a.shape == (_u @ _v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print_format('TensorFlow', 'Nonlinear semi-NMF(NOT CALCLATE v)', a, u, v, old_loss, new_loss, duration)
 def test_tf_vanilla_semi_nmf(self):
     a = np.random.uniform(-1., 1., size=(100, 100))
     u = np.random.uniform(0., 1., size=(100, 300))
     v = np.random.uniform(-1., 1., size=(300, 100))
     old_loss = np_frobenius_norm(a, u @ v)
     
     # [1000, 500]
     a_ph = tf.placeholder(tf.float64, shape=a.shape)
     # [1000, 201]
     u_ph = tf.placeholder(tf.float64, shape=u.shape)
     # [200, 500]
     v_ph = tf.placeholder(tf.float64, shape=v.shape)
     tf_u, tf_v = semi_nmf(a_ph, u_ph, v_ph, use_tf=True, use_bias=False)
     tf_loss = frobenius_norm(a_ph, tf.matmul(tf_u, tf_v))
     
     init = tf.global_variables_initializer()
     with tf.Session() as sess:
         init.run()
         
         start_time = time.time()
         _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss], feed_dict={a_ph: a, u_ph: u, v_ph: v})
         assert np.min(_u) > 0, np.min(_u)
         end_time = time.time()
     
     duration = end_time - start_time
     assert a.shape == (_u @ _v).shape
     assert new_loss < old_loss, "new loss should be less than old loss."
     print('\n[TensorFlow]Solve semi-NMF\n\t'
           'old loss {0}\n\t'
           'new loss {1}\n\t'
           'process duration {2}'.format(old_loss, new_loss, duration))
     print_format('TensorFlow', 'semi-NMF', a, u, v, old_loss, new_loss, duration)
Esempio n. 8
0
def build_rnn_mnist(batch_size, use_bias=False, activation=None):
    time_steps = 28
    num_features = 28
    inputs = tf.keras.layers.Input((time_steps, num_features), batch_size=batch_size, dtype=tf.float64, name='inputs')
    labels = tf.placeholder(tf.float64, (batch_size, 10), name='labels')
    
    activation = None or activation
    x = tf.keras.layers.SimpleRNN(100, use_bias=use_bias, activation=activation, return_sequences=True)(inputs)
    x = x[:, -1, :]
    # x = tf.keras.layers.SimpleRNN(100, use_bias=use_bias, activation=activation)(inputs)
    # print(x)
    # print(search(x))
    outputs = tf.layers.dense(x, 10, activation=None, use_bias=use_bias)
    
    losses = frobenius_norm(labels, outputs)
    frob_norm = tf.reduce_mean(losses)
    other_losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=outputs)
    cross_entropy = tf.reduce_mean(other_losses)
    
    correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(outputs, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100.
    
    return agents.tools.AttrDict(inputs=inputs,
                                 outputs=outputs,
                                 labels=labels,
                                 frob_norm=frob_norm,
                                 cross_entropy=cross_entropy,
                                 accuracy=accuracy,
                                 )
Esempio n. 9
0
    def test_tf_vanilla_semi_nmf(self):
        auv = sio.loadmat(mat_file)
        a, u, v = auv['a'], auv['u'], auv['v']
        old_loss = np_frobenius_norm(a, u @ v)

        # [1000, 500]
        a_ph = tf.placeholder(tf.float64, shape=a.shape)
        # [1000, 201]
        u_ph = tf.placeholder(tf.float64, shape=u.shape)
        # [200, 500]
        v_ph = tf.placeholder(tf.float64, shape=v.shape)
        tf_u, tf_v = semi_nmf(a_ph, u_ph, v_ph, use_tf=True, use_bias=False)
        tf_loss = frobenius_norm(a_ph, tf.matmul(tf_u, tf_v))

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()

            start_time = time.time()
            _u, _v, new_loss = sess.run([tf_u, tf_v, tf_loss],
                                        feed_dict={
                                            a_ph: a,
                                            u_ph: u,
                                            v_ph: v
                                        })
            end_time = time.time()

        duration = end_time - start_time
        assert a.shape == (_u @ _v).shape
        assert new_loss < old_loss, "new loss should be less than old loss."
        print('\n[TensorFlow]Solve semi-NMF\n\t'
              'old loss {0}\n\t'
              'new loss {1}\n\t'
              'process duration {2}'.format(old_loss, new_loss, duration))
Esempio n. 10
0
def build_tf_model():
    batch_size = 3000
    label_size = 1
    inputs = tf.placeholder(tf.float64, (batch_size, 784), name='inputs')
    labels = tf.placeholder(tf.float64, (batch_size, label_size),
                            name='labels')
    x = tf.layers.dense(inputs, 100, activation=tf.nn.relu, use_bias=True)
    x = tf.layers.dense(x, 50, use_bias=False, activation=tf.nn.relu)
    outputs = tf.layers.dense(x, label_size, activation=None, use_bias=True)
    losses = frobenius_norm(labels, outputs)
    loss = tf.reduce_mean(losses)
    correct_prediction = tf.equal(tf.cast(labels, tf.int32),
                                  tf.cast(outputs, tf.int32))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) * 100.
    tf.summary.scalar('accuracy', accuracy)

    return agents.tools.AttrDict(
        inputs=inputs,
        outputs=outputs,
        labels=labels,
        loss=loss,
        accuracy=accuracy,
    )