Пример #1
0
def mnist_mlp(args):
    # write tensorflow models
    x = tf.placeholder(tf.float32, [args.batch_size, 784])
    t = tf.placeholder(tf.float32, [args.batch_size, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, w) + b
    cost = tf.reduce_mean(-tf.reduce_sum(
        t * tf.log(tf.nn.softmax(y)), reduction_indices=[1]))
    init = tf.global_variables_initializer()

    # import graph_def
    importer = TFImporter()
    importer.import_graph_def(tf.get_default_graph().as_graph_def())

    # get handle of ngraph ops
    x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle([x, t, cost, init])

    # transformer and computations
    with ExecutorFactory() as ex:
        updates = CommonSGDOptimizer(args.lrate).minimize(cost_ng, cost_ng.variables())
        train_comp = ex.executor(ng.sequential([updates, cost_ng]), x_ng, t_ng)
        init_comp = ex.executor(init_op_ng)
        ex.transformer.initialize()

        # train
        if args.random_data is not None:
            mnist = args.random_data
            mnist.reset(0)
        else:
            mnist = input_data.read_data_sets(args.data_dir, one_hot=True)

        init_comp()
        ng_cost_vals = []
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val = train_comp(batch_xs, batch_ys)
            ng_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    # train in tensorflow as comparison
    with tf.Session() as sess:
        # train in tensorflow
        train_step = tf.train.GradientDescentOptimizer(args.lrate).minimize(cost)
        sess.run(init)
        if args.random_data is not None:
            mnist = args.random_data
            mnist.reset(0)
        else:
            mnist = input_data.read_data_sets(args.data_dir, one_hot=True)
        tf_cost_vals = []
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val, _ = sess.run([cost, train_step],
                                   feed_dict={x: batch_xs, t: batch_ys})
            tf_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    return ng_cost_vals, tf_cost_vals
Пример #2
0
def logistic_regression(args):
    # setups -> xs: (N, C), y: (N, 1)
    xs_np = np.array([[0.52, 1.12, 0.77], [0.88, -1.08, 0.15],
                      [0.52, 0.06, -1.30], [0.74, -2.49, 1.39]])
    ys_np = np.array([[1], [1], [0], [1]])

    # placeholders
    x = tf.placeholder(tf.float32, shape=(4, 3))
    t = tf.placeholder(tf.float32, shape=(4, 1))
    w = tf.Variable(tf.zeros([3, 1]))
    y = tf.nn.sigmoid(tf.matmul(x, w))
    log_likelihoods = tf.log(y) * t + tf.log(1 - y) * (1 - t)
    cost = -tf.reduce_sum(log_likelihoods)
    init_op = tf.global_variables_initializer()

    # import graph_def
    importer = TFImporter()
    importer.import_graph_def(tf.get_default_graph().as_graph_def())

    # get handle of ngraph ops
    x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle(
        [x, t, cost, init_op])

    # transformer and computations
    transformer = ngt.make_transformer()
    updates = util.CommonSGDOptimizer(args.lrate).minimize(
        cost_ng, cost_ng.variables())

    train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
    init_comp = transformer.computation(init_op_ng)
    transformer.initialize()

    # train
    init_comp()
    ng_cost_vals = []
    for idx in range(args.max_iter):
        cost_val, _ = train_comp(xs_np, ys_np)
        ng_cost_vals.append(float(cost_val))
        print("[Iter %s] Cost = %s" % (idx, cost_val))

    transformer.close()

    # tensorflow for comparison
    with tf.Session() as sess:
        train_step = tf.train.GradientDescentOptimizer(
            args.lrate).minimize(cost)
        sess.run(init_op)
        tf_cost_vals = []
        for idx in range(args.max_iter):
            cost_val, _ = sess.run([cost, train_step],
                                   feed_dict={
                                       x: xs_np,
                                       t: ys_np
                                   })
            tf_cost_vals.append(float(cost_val))
            print("[Iter %s] Cost = %s" % (idx, cost_val))

    return ng_cost_vals, tf_cost_vals
Пример #3
0
def train_mnist(args):
    # write tensorflow models
    x = tf.placeholder(tf.float32, [args.batch_size, 784])
    t = tf.placeholder(tf.float32, [args.batch_size, 10])
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, w) + b
    cost = tf.reduce_mean(
        -tf.reduce_sum(t * tf.log(tf.nn.softmax(y)), reduction_indices=[1]))
    init = tf.initialize_all_variables()

    # import graph_def
    with tf.Session() as sess:
        graph_def = sess.graph_def
    importer = TFImporter()
    importer.parse_graph_def(graph_def)

    # get handle of ngraph ops
    x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle(
        [x, t, cost, init])

    # transformer and computations
    transformer = ngt.make_transformer()
    updates = SGDOptimizer(args.lrate).minimize(cost_ng)
    train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
    init_comp = transformer.computation(init_op_ng)
    transformer.initialize()

    # train
    mnist = input_data.read_data_sets(args.data_dir, one_hot=True)
    init_comp()
    for idx in range(args.max_iter):
        batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
        cost_val, _ = train_comp(batch_xs, batch_ys)
        print("[Iter %s] Cost = %s" % (idx, cost_val))

    # train in tensorflow as comparison
    with tf.Session() as sess:
        # train in tensorflow
        train_step = tf.train.GradientDescentOptimizer(
            args.lrate).minimize(cost)
        sess.run(init)

        mnist = input_data.read_data_sets(args.data_dir, one_hot=True)
        for idx in range(args.max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
            cost_val, _ = sess.run([cost, train_step],
                                   feed_dict={
                                       x: batch_xs,
                                       t: batch_ys
                                   })
            print("[Iter %s] Cost = %s" % (idx, cost_val))
Пример #4
0
def ng_retrain_mnist(args):
    """
    Load meta_graph / checkpoint and retrain in ng for max_iter

    Args:
        args: command line arguments
    """

    # dataset, with offset max_iter
    if args.random_data is not None:
        mnist = args.random_data
        mnist.reset(0)
    else:
        mnist = input_data.read_data_sets(args.data_dir, one_hot=True)

    for _ in range(args.max_iter):
        mnist.train.next_batch(args.batch_size)

    # init importer
    importer = TFImporter()

    # parse meta-graph and model checkpoint file
    importer.import_meta_graph(args.checkpoint_path + '.meta',
                               checkpoint_path=args.checkpoint_path)

    # get collections, must be specified by `tf.add_to_collection` before save
    x_ng, t_ng, cost_ng, init_op_ng = importer.get_collection_handle(
        ['x', 't', 'cost', 'init_op'])

    # get variable restore op
    restore_op_ng = importer.get_restore_op()

    # transformer and computations
    transformer = ngt.make_transformer()
    updates = util.CommonSGDOptimizer(args.lrate).minimize(
        cost_ng, cost_ng.variables())
    train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
    init_comp = transformer.computation(init_op_ng)
    restore_comp = transformer.computation(restore_op_ng)
    transformer.initialize()

    # train in ngraph
    init_comp()
    restore_comp()
    ng_cost_vals = []
    for idx in range(args.max_iter):
        batch_xs, batch_ys = mnist.train.next_batch(args.batch_size)
        cost_val, _ = train_comp(batch_xs, batch_ys)
        ng_cost_vals.append(float(cost_val))
        print("[Iter %s] Cost = %s" % (idx, cost_val))

    transformer.close()

    return ng_cost_vals
Пример #5
0
    def ng_run(self,
               tf_target_node,
               tf_feed_dict=None,
               print_ng_result=False,
               verbose=False):
        """
        Run and get ngrpah results
        Args:
            tf_target_node: target node in tf
            tf_feed_dict: feed_dict in tf
            print_ng_result: prints ng_result if set to True
            verbose: prints tf's node_def if set to True

        Returns:
            ng_result
        """
        # init importer, transformer
        importer = TFImporter()
        importer.import_protobuf(self.pb_txt_path, verbose=verbose)
        transformer = ngt.make_transformer()

        # set target node
        ng_target_node = importer.get_op_handle_by_name(
            tf_target_node.name[:-2])

        # evaluate ngraph
        if tf_feed_dict is not None:
            # get targeting nodes for ng, convert tf's feed dict to list
            tf_placeholder_nodes = [node for (node, _) in tf_feed_dict.items()]
            tf_placeholder_names = [node.name for node in tf_placeholder_nodes]
            ng_placeholder_nodes = [
                importer.get_op_handle_by_name(name[:-2])
                for name in tf_placeholder_names
            ]
            ng_placeholder_vals = [val for (_, val) in tf_feed_dict.items()]

            # evaluate ngraph result
            ng_result_comp = transformer.computation([ng_target_node],
                                                     *ng_placeholder_nodes)
            if importer.init_ops:
                init_comp = transformer.computation(importer.init_ops)
                init_comp()

            ng_result = ng_result_comp(*ng_placeholder_vals)[0]
        else:
            ng_result_comp = transformer.computation([ng_target_node])
            if importer.init_ops:
                init_comp = transformer.computation(importer.init_ops)
                init_comp()
            ng_result = ng_result_comp()[0]
        if print_ng_result:
            print(ng_result)

        return ng_result
    def ng_run(self,
               tf_target_node,
               tf_init_op=None,
               tf_feed_dict={},
               print_ng_result=False,
               verbose=False):
        """
        Run and get ngraph results
        Args:
            tf_target_node: target node in tf
            tf_feed_dict: feed_dict in tf
            print_ng_result: prints ng_result if set to True
            verbose: prints tf's node_def if set to True

        Returns:
            ng_result
        """
        # init importer, transformer
        importer = TFImporter()
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(self.graph_string)
        importer.import_graph_def(graph_def, verbose=verbose)

        # set target node
        ng_target_node = importer.get_op_handle_by_name(
            tf_target_node.name[:-2])

        # get targeting nodes for ng, convert tf's feed dict to list
        ng_feed_dict = {
            importer.get_op_handle_by_name(node.name[:-2]): val
            for (node, val) in tf_feed_dict.items()
        }

        # evaluate ngraph
        with ExecutorFactory() as ex:
            ng_result_comp = ex.transformer.computation(
                ng_target_node, *ng_feed_dict.keys())

            if tf_init_op:
                ex.transformer.computation(
                    importer.get_op_handle(tf_init_op))()

            ng_result = ng_result_comp(feed_dict=ng_feed_dict)

        return ng_result
Пример #7
0
    def test_mnist_softmax(self):
        # parameters
        max_iter = 10
        lrate = 0.1
        bsz = 128

        # write tensorflow models
        x = tf.placeholder(tf.float32, [bsz, 784])
        t = tf.placeholder(tf.float32, [bsz, 10])
        W = tf.Variable(tf.zeros([784, 10]))
        b = tf.Variable(tf.zeros([10]))
        y = tf.matmul(x, W) + b
        cost = tf.reduce_mean(-tf.reduce_sum(
            t * tf.log(tf.nn.softmax(y)), reduction_indices=[1]))
        init_op = tf.initialize_all_variables()

        # import graph_def
        with tf.Session() as sess:
            graph_def = sess.graph_def
        importer = TFImporter()
        importer.parse_graph_def(graph_def)

        # get handle of ngraph ops
        x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle(
            [x, t, cost, init_op])

        # transformer and computations
        transformer = ngt.make_transformer()
        updates = SGDOptimizer(lrate).minimize(cost_ng)
        train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
        init_comp = transformer.computation(init_op_ng)
        transformer.initialize()

        # train
        mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
        init_comp()
        ng_costs = []
        for idx in range(max_iter):
            batch_xs, batch_ys = mnist.train.next_batch(bsz)
            cost_val, _ = train_comp(batch_xs, batch_ys)
            print("[Iter %s] Cost = %s" % (idx, cost_val))
            ng_costs.append(float(cost_val))

        # train in tensorflow as comparison
        with tf.Session() as sess:
            # train in tensorflow
            train_step = tf.train.GradientDescentOptimizer(lrate).minimize(
                cost)
            sess.run(init_op)

            mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
            tf_costs = []
            for idx in range(max_iter):
                batch_xs, batch_ys = mnist.train.next_batch(bsz)
                cost_val, _ = sess.run([cost, train_step],
                                       feed_dict={x: batch_xs,
                                                  t: batch_ys})
                print("[Iter %s] Cost = %s" % (idx, cost_val))
                tf_costs.append(cost_val)

        # check results
        assert np.allclose(
            np.asarray(tf_costs).astype(np.float32),
            np.asarray(ng_costs).astype(np.float32))
Пример #8
0
max_iter = 10
lrate = 0.1

# placeholders
x = tf.placeholder(tf.float32, shape=(4, 3))
t = tf.placeholder(tf.float32, shape=(4, 1))
w = tf.Variable(tf.zeros([3, 1]))
y = tf.nn.sigmoid(tf.matmul(x, w))
log_likelihoods = tf.log(y) * t + tf.log(1 - y) * (1 - t)
cost = -tf.reduce_sum(log_likelihoods)
init_op = tf.initialize_all_variables()

# import graph_def
with tf.Session() as sess:
    graph_def = sess.graph_def
importer = TFImporter()
importer.parse_graph_def(graph_def)

# get handle of ngraph ops
x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle([x, t, cost, init_op])

# transformer and computations
transformer = ngt.make_transformer()
updates = SGDOptimizer(lrate).minimize(cost_ng)
train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
init_comp = transformer.computation(init_op_ng)
transformer.initialize()

# train
init_comp()
for idx in range(max_iter):
Пример #9
0
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------

from __future__ import print_function
import tensorflow as tf
from ngraph.testing import ExecutorFactory
from ngraph.frontends.tensorflow.tf_importer.importer import TFImporter

# tensorflow ops
x = tf.constant(1.)
y = tf.constant(2.)
f = x + y

# import
importer = TFImporter()
importer.import_graph_def(tf.get_default_graph().as_graph_def())

# get handle
f_ng = importer.get_op_handle(f)

# execute
with ExecutorFactory() as ex:
    f_result = ex.executor(f_ng)()
print(f_result)
Пример #10
0
                  [0.74, -2.49, 1.39]])
ys_np = np.array([[1], [1], [0], [1]])
max_iter = 10
lrate = 0.1

# placeholders
x = tf.placeholder(tf.float32, shape=(4, 3))
t = tf.placeholder(tf.float32, shape=(4, 1))
w = tf.Variable(tf.zeros([3, 1]))
y = tf.nn.sigmoid(tf.matmul(x, w))
log_likelihoods = tf.log(y) * t + tf.log(1 - y) * (1 - t)
cost = -tf.reduce_sum(log_likelihoods)
init_op = tf.initialize_all_variables()

# import graph_def
importer = TFImporter()
importer.import_graph_def(tf.get_default_graph().as_graph_def())

# get handle of ngraph ops
x_ng, t_ng, cost_ng, init_op_ng = importer.get_op_handle([x, t, cost, init_op])

# transformer and computations
transformer = ngt.make_transformer()
updates = SGDOptimizer(lrate).minimize(cost_ng)
train_comp = transformer.computation([cost_ng, updates], x_ng, t_ng)
init_comp = transformer.computation(init_op_ng)
transformer.initialize()

# train
init_comp()
for idx in range(max_iter):
Пример #11
0
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------

from __future__ import print_function
from ngraph.frontends.tensorflow.tf_importer.importer import TFImporter
import tensorflow as tf
import ngraph.transformers as ngt

# tensorflow ops
x = tf.constant(1.)
y = tf.constant(2.)
f = x + y

# import
importer = TFImporter()
importer.parse_graph_def(tf.Session().graph_def)

# get handle
f_ng = importer.get_op_handle(f)

# execute
f_result = ngt.make_transformer().computation(f_ng)()
print(f_result)