def main(argv):
    max_iter = 100
    matrix = np.random.random((1000, 1000))
    jobs_def = [
        {
            "name": "ps",
            "num": 2
        },
        {
            "name": "worker",
            "num": 2
        },
    ]
    mesos_master = argv[1]
    nmf = NMF(matrix, 200)
    with cluster(jobs_def, master=mesos_master) as c:
        with tf.Session(c.targets['/job:worker/task:1']) as session:
            session.run(tf.global_variables_initializer())
            for i in range(max_iter):
                mat_w, mat_h, loss = nmf.run(session)
                print("loss#%d: %s" % (i, loss))

    err = np.power(matrix - np.matmul(mat_w, mat_h), 2)
    print("err mean: %s" % err.mean())
    print("loss: %s" % loss)
Esempio n. 2
0
def main(argv):
    max_iter = 100
    matrix = np.random.random((1000, 1000))
    jobs_def = [
        {
            "name": "ps",
            "num": 2
        },
        {
            "name": "worker",
            "num": 2
        },
    ]
    mesos_master = argv[1]
    with cluster(jobs_def, master=mesos_master) as targets:
        with tf.Session(targets['/job:worker/task:1']) as session:
            nmf = NMF(session, matrix, 200)
            for i in range(max_iter):
                mat_w, mat_h, loss = nmf.run()
                print "loss#%d: %s" % (i, loss)

    err = np.power(matrix - np.matmul(mat_w, mat_h), 2)
    print "err mean: %s" % err.mean()
    print "loss: %s" % loss
Esempio n. 3
0
def main(argv):
    jobs_def = [
        {
            "name": "ps",
            "num": 2
        },
        {
            "name": "worker",
            "num": 2
        },
    ]
    mesos_master = sys.argv[1]
    with cluster(jobs_def, master=mesos_master, quiet=False) as targets:
        with tf.device('/job:ps/task:0'):
            a = tf.constant(10)

        with tf.device('/job:ps/task:1'):
            b = tf.constant(32)

        with tf.device("/job:worker/task:1"):
            op = a + b

        with tf.Session(targets['/job:worker/task:0']) as sess:
            print(sess.run(op))
Esempio n. 4
0
def main(argv):
    jobs_def = [
        {
            "name": "ps",
            "num": 2
        },
        {
            "name": "worker",
            "num": 2
        },
    ]
    mesos_master = sys.argv[1]
    with cluster(jobs_def, master=mesos_master, quiet=False) as c:
        with tf.device('/job:ps/task:0'):
            a = tf.constant(10)

        with tf.device('/job:ps/task:1'):
            b = tf.constant(32)

        with tf.device("/job:worker/task:1"):
            op = a + b

        with tf.Session(c.targets['/job:worker/task:0']) as sess:
            print(sess.run(op))
Esempio n. 5
0
jobs_def = [
    {
        "name": "ps",
        "num": nserver
    },
    {
        "name": "worker",
        "num": nworker,
        "gpus": args.worker_gpus,
    },
]

_lock = RLock()
mnist = read_data_sets("MNIST_data/", one_hot=True)
with cluster(jobs_def, master=master, quiet=False, **extra_kw) as c:
    graph = tf.Graph()
    with graph.as_default():
        with tf.device(tf.train.replica_device_setter(ps_tasks=nserver)):
            W = tf.Variable(tf.zeros([784, 10]))
            b = tf.Variable(tf.zeros([10]))
            global_step = tf.Variable(0)
            x = tf.placeholder(tf.float32, [None, 784])
            y = tf.nn.softmax(tf.matmul(x, W) + b)
            y_ = tf.placeholder(tf.float32, [None, 10])
            cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

            steps = []
            for i in range(nworker):
                with tf.device('/job:worker/task:%d' % i):
                    steps.append(
Esempio n. 6
0
jobs_def = [
    {
        "name": "ps",
        "num": nserver
    },
    {
        "name": "worker",
        "num": nworker,
        "gpus": args.worker_gpus,
    },
]

_lock = RLock()
mnist = read_data_sets("MNIST_data/", one_hot=True)
with cluster(jobs_def, master=master, quiet=False) as targets:
    graph = tf.Graph()
    with graph.as_default():
        with tf.device(tf.train.replica_device_setter(ps_tasks=nserver)):
            W = tf.Variable(tf.zeros([784, 10]))
            b = tf.Variable(tf.zeros([10]))
            global_step = tf.Variable(0)
            x = tf.placeholder(tf.float32, [None, 784])
            y = tf.nn.softmax(tf.matmul(x, W) + b)
            y_ = tf.placeholder(tf.float32, [None, 10])
            cross_entropy = -tf.reduce_sum(y_*tf.log(y))

            steps = []
            for i in range(nworker):
                with tf.device('/job:worker/task:%d' % i):
                    steps.append(tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy, global_step=global_step))
Esempio n. 7
0
jobs_def = [
    {
        "name": "ps",
        "num": nserver
    },
    {
        "name": "worker",
        "num": nworker,
        "gpus": args.worker_gpus,
    },
]

_lock = RLock()
mnist = read_data_sets("MNIST_data/", one_hot=True)
with cluster(jobs_def, master=master, quiet=False) as targets:
    graph = tf.Graph()
    with graph.as_default():
        with tf.device(tf.train.replica_device_setter(ps_tasks=nserver)):
            W = tf.Variable(tf.zeros([784, 10]))
            b = tf.Variable(tf.zeros([10]))
            global_step = tf.Variable(0)
            x = tf.placeholder(tf.float32, [None, 784])
            y = tf.nn.softmax(tf.matmul(x, W) + b)
            y_ = tf.placeholder(tf.float32, [None, 10])
            cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

            steps = []
            for i in xrange(nworker):
                with tf.device('/job:worker/task:%d' % i):
                    steps.append(