コード例 #1
0
 def run(self, feeds=None):
     """
     Run tensors and dump to summary file.
     """
     self.make()
     result = ThisSession.run(self.summary_op, feeds)
     self.file_writer.add_summary(result, ThisSession.run(GlobalStep()))
コード例 #2
0
ファイル: dist2.py プロジェクト: tech-pi/dxlearn
def main_basic(job, task):
    cfg = {"worker": ["localhost:2222",
                      "localhost:2223"]}
    make_distribute_host(cfg, job, task, None, 'worker', 0)
    master_host = Master.master_host()
    this_host = ThisHost.host()
    host1 = Host(job, 1)
    hmi = DistributeGraphInfo(None, None, None, master_host)
    with tf.variable_scope('scope_test'):
        t0 = TensorVariable(VariableInfo(None, [1], tf.float32),
                            hmi.update(name='v0'))
        aop = tf.assign(t0.data, tf.constant([3.]))
        t1 = TensorNumpyNDArray([1.0], None,
                                hmi.update(name='v1'))
        t1c = t1.copy_to(host1)
        t1p = Tensor(t1c.data + 1, t1c.data_info,
                     t1c.graph_info.update(name='t1_plus'))
    make_distribute_session()
    if task == 0:
        ptensor(t1)
        Server.join()
    if task == 1:
        ptensor(t1)
        ptensor(t1c)
        ptensor(t1p)
        ptensor(t0)
        ThisSession.run(aop)
        ptensor(t0)
コード例 #3
0
ファイル: osem.py プロジェクト: tech-pi/SRF
    def bind_local_data_splitted(self, lors):
        step = 10000
        nb_osem = 10
        for i in range(nb_osem):
            self.worker_graphs[self.task_index].tensors['osem_{}'.format(
                i)] = self.tensor('lorx').assign(lors[i * step:(i + 1) * step,
                                                      ...])

        # when run
        ThisSession.run()
コード例 #4
0
ファイル: network.py プロジェクト: alex10151/dxlearn
    def train(self, name=None, feeds=None):
        if name is None:
           name = 'default'
        trainer = self.tensors.get(name)
        if trainer is None:
            raise ValueError("Nothing to train, please bind first.")
        
        ThisSession.run(trainer, feeds)
        global_step = ThisSession.run(self.global_step.increased())

        self.on_step_end(name, global_step)
コード例 #5
0
    def test_train(self):
        network = self.create_network()
        network.make()
        with self.variables_initialized_test_session() as sess:
            ThisSession.set_session(sess)
            losses = []
            for i in range(100):
                if i % 10 == 0:
                    l = sess.run(network.get_objective())
                    losses.append(l)
                network.train()

        assert self.is_mono_decay(losses)
コード例 #6
0
ファイル: distribute.py プロジェクト: twj2417/srf
 def run_task(self):
     KT = self.KEYS.TENSOR
     KC = self.KEYS.CONFIG
     ThisSession.run(self.tensors[KT.INIT])
     for i in tqdm(range(self.config[KC.NB_ITERATIONS])):
         ThisSession.run(self.tensors[KT.RECON])
         ThisSession.run(self.tensors[KT.MERGE])
         if ThisHost.is_master():
             x = ThisSession.run(self.tensors[KT.X].data)
             np.save(f'./result_{i}.npy', x)
コード例 #7
0
ファイル: extra2.py プロジェクト: tech-pi/dxlearn
def main(job, task):
    tf.logging.set_verbosity(0)
    cfg = {"worker": ["localhost:2222",
                      "localhost:2223"]}
    make_distribute_host(cfg, job, task, None, 'worker', 0)
    # # if task == 1:
    #     # time.sleep(10)
    # with tf.device(Master.master_host().device_prefix()):
    #     with tf.variable_scope('test'):
    #         t1 = tf.get_variable('var', [], tf.float32)
    master_host = Master.master_host()
    this_host = ThisHost.host()
    host2 = Host(job, 1)
    hmi = DistributeGraphInfo(None, None, None, master_host)
    with tf.variable_scope('scope_test'):
        t0 = TensorVariable(VariableInfo(None, [1], tf.float32),
                            DistributeGraphInfo.from_(hmi, name='t1'))
        aop = tf.assign(t0.data, tf.constant([3.]))
        t1 = TensorNumpyNDArray([1.0], None,
                                DistributeGraphInfo.from_(hmi, name='t1_copy'))
        t1c = t1.copy_to(host2)
        t1p = Tensor(t1c.data + 1, t1c.data_info, DistributeGraphInfo.from_(t1c.graph_info, name='t1_plus'))
        # t2 = t0.copy_to(host2)
    make_distribute_session()
    if task == 0:
        # ThisSession.run(tf.global_variables_initializer())
        ptensor(t1)
        Server.join()
    if task == 1:
        ptensor(t1)
        ptensor(t1c)
        ptensor(t1p)
        # print(t2.run())
        # print(t2.data)
        # print(t0.run())
        # print(t0)
        ptensor(t0)
        print(ThisSession.run(aop))
        ptensor(t0)
コード例 #8
0
ファイル: keep_prob.py プロジェクト: alex10151/dxlearn
 def test_phase(self):
     ThisSession.run(self.assign_to_one)
     yield
     ThisSession.run(self.assign_to_init)
コード例 #9
0
ファイル: osem.py プロジェクト: tech-pi/SRF
 def run_and_save_if_is_master(self, x, path):
     if ThisHost.is_master():
         if isinstance(x, Tensor):
             x = x.data
         result = ThisSession.run(x)
         np.save(path, result)
コード例 #10
0
ファイル: dist2.py プロジェクト: tech-pi/dxlearn
def main_sync(job, task):
    cfg = {"master": ["localhost:2221"],
           "worker": ["localhost:2222",
                      "localhost:2223"]}
    make_distribute_host(cfg, job, task, None, 'master', 0)
    master_host = Master.master_host()
    this_host = ThisHost.host()
    host0 = Host('worker', 0)
    host1 = Host('worker', 1)

    def sleep(ips):
        for i in range(5, 0, -1):
            time.sleep(1)
        return 0
    # hmi = DistributeGraphInfo(None, None, None, master_host)
    tm = TensorNumpyNDArray([1.0], None,
                            DistributeGraphInfo.from_graph_info(hmi, name='t0'))
    tcs = []
    # t0c = tm.copy_to(host0)
    # t1c = tm.copy_to(host1)
    # m_sum = Summation(name='summation', graph_info=DistributeGraphInfo(
    #     'summation', None, None, host0))([t0c, t1c])
    ops = tf.FIFOQueue(2, tf.bool, shapes=[],
                       name='barrier', shared_name='barrier')
    # ptensor(tm)
    if ThisHost.host() == master_host:
        join = ops.dequeue_many(2)
    else:
        signal = ops.enqueue(False)
    no = tf.constant('tmp')
    ops = [tf.Print(no, data=[no], message='Done_{}'.format(i), name='p_{}'.format(i))
           for i in range(3)]
    # ops.enqueue()
    make_distribute_session()
    if ThisHost.host() == master_host:
        ThisSession.run(join)
        print('Joined.')
        time.sleep(2)
        ThisSession.run(ops[0])
        # Server.join()
    elif ThisHost.host() == host0:
        ThisSession.run(signal)
        ThisSession.run(ops[1])
    elif ThisHost.host() == host1:
        time.sleep(3)
        ThisSession.run(signal)
        ThisSession.run(ops[2])
コード例 #11
0
 def run(self, feeds=None):
     self.make()
     result = ThisSession.run(self.summary_op, feeds)
     self.file_writer.add_summary(result, ThisSession.run(GlobalStep()))
コード例 #12
0
 def auto_run(self, feeds=None):
     if ThisSession.run(GlobalStep()) >= self._next_summary_step:
         self.run(feeds)
         self._next_summary_step += self.config(
             self.KEYS.CONFIG.NB_INTERVAL)
コード例 #13
0
ファイル: session.py プロジェクト: tech-pi/dxlearn
def make_distribute_session(session_name='depsession', target=None):
    if target is None:
        target = Server.server().target
    ThisSession.set_session(MonitoredSession(session_name, target))
    return ThisSession.session()
コード例 #14
0
 def evaluate(self, name=None, feeds=None):
     with get_global_context().test_phase():
         return ThisSession.run(self.tensors.get(name), feeds)
コード例 #15
0
ファイル: sino.py プロジェクト: tech-pi/SRF
 def run_and_print_if_is_master(self, x):
     if ThisHost.is_master():
         if isinstance(x, Tensor):
             x = x.data
         result = ThisSession.run(x)
         print(result)