def test_net_multi_use(self): with Task() as task: total = ops.Const(0) net = Net('my_net') net.Add([total, net.Const(1)], [total]) ops.net(net) ops.net(net) result = final_output(total) with LocalSession() as session: session.run(task) self.assertEquals(2, result.fetch())
def test_net_multi_use(self): with Task() as task: total = ops.Const(0) net = Net('my_net') net.Add([total, net.Const(1)], [total]) ops.net(net) ops.net(net) result = final_output(total) with LocalSession() as session: session.run(task) self.assertEquals(2, result.fetch())
def test_download_group_simple(self): """ A simple test that ensures we have download task group executed between epoch_group and exit_group. """ model = model_helper.ModelHelper(name="test_model") download_net = core.Net("download_net") for name in ["input1", "input2", "output", "download_result"]: model.param_init_net.ConstantFill([], [name], shape=[ 8, ], value=1.0, run_once=0) model.net.Add(["input1", "input2"], ["output"]) download_net.Copy(["output"], ["download_result"]) # All blob values are initialized as 1.0, after download_net executed # we expect to see download result is the same as training result. with Job() as job: with Node("trainer:0"): with job.init_group: Task(step=model.param_init_net) with job.epoch_group: with Task(): with ops.loop(1): ops.net(model.net) with job.download_group: Task(step=download_net) epoch_limiter(job, 1) ws = workspace.C.Workspace() session = LocalSession(ws) job_runner = JobRunner(job) job_runner.train(session) expected_result = np.full(8, 2.0).astype(np.float32) self.assertTrue( np.array_equal(expected_result, ws.fetch_blob("output"))) self.assertTrue( np.array_equal(expected_result, ws.fetch_blob("download_result")))
def test_download_group_simple(self): """ A simple test that ensures we have download task group executed between epoch_group and exit_group. """ model = model_helper.ModelHelper(name="test_model") download_net = core.Net("download_net") for name in ["input1", "input2", "output", "download_result"]: model.param_init_net.ConstantFill([], [name], shape=[8, ], value=1.0, run_once=0) model.net.Add(["input1", "input2"], ["output"]) download_net.Copy(["output"], ["download_result"]) # All blob values are initialized as 1.0, after download_net executed # we expect to see download result is the same as training result. with Job() as job: with Node("trainer:0"): with job.init_group: Task(step=model.param_init_net) with job.epoch_group: with Task(): with ops.loop(1): ops.net(model.net) with job.download_group: Task(step=download_net) epoch_limiter(job, 1) ws = workspace.C.Workspace() session = LocalSession(ws) job_runner = JobRunner(job) job_runner.train(session) expected_result = np.full(8, 2.0).astype(np.float32) self.assertTrue(np.array_equal(expected_result, ws.fetch_blob("output"))) self.assertTrue(np.array_equal(expected_result, ws.fetch_blob("download_result")))
def _static_threads_task(name, group, final_outputs, reader, num_threads, output, capacity): node_name = str(Node.current()) profiler_name = "{0}/{1}/{2}/{3}/{4}".format( node_name, "pipe", name, processor_name(input) if input else "NoInput", processor_name(output) if output else "NoOutput") with Task(name=name, group=group, outputs=final_outputs) as task: global_exit_net = core.Net('exit') global_init_net = core.Net('init') reader.setup_ex(global_init_net, global_exit_net) out_queue = None writer = None steps = [] for thread_id in range(num_threads): with NetBuilder(name='t:%d' % thread_id) as nb: init_net = core.Net('init') exit_net = core.Net('exit') read_nets, status, rec = reader.read_record_ex( init_net, exit_net) init_net.ConstantFill( [], [status], shape=[], value=False, dtype=core.DataType.BOOL ) if rec is not None: if writer is None: # hack so that the out queue gets the right name prefix # (otherwise they would be prefixed with the thread id) with NetBuilder(_fullname=task.name): out_queue, writer = _init_output( output, capacity, global_init_net, global_exit_net) write_nets, _ = writer.write_record_ex( rec, init_net, exit_net, status) else: write_nets = [] timer_start_net = core.Net('timer_start') timer = timer_start_net.TimerBegin([], counter_name=profiler_name) timer_end_net = core.Net('timer_end') timer_end_net.TimerEnd(timer, []) ops.net(init_net) ops.net(core.execution_step( 'body', [timer_start_net] + list(read_nets) + list(write_nets) + [timer_end_net], should_stop_blob=status)) ops.net(timer_end_net) ops.net(exit_net) steps.append(core.to_execution_step(nb)) ops.net(global_init_net) ops.net(core.execution_step('body', steps, concurrent_substeps=True)) ops.net(global_exit_net) return out_queue, task
def _runtime_threads_task(name, group, final_outputs, reader, num_threads, output, capacity): node_name = str(Node.current()) profiler_name = "{0}/{1}/{2}/{3}/{4}".format( node_name, "pipe", name, processor_name(input) if input else "NoInput", processor_name(output) if output else "NoOutput") with Task(name=name, group=group, outputs=final_outputs, num_instances=num_threads) as task: global_exit_net = core.Net('pipe:exit') global_init_net = core.Net('pipe:init') reader.setup_ex(global_init_net, global_exit_net) init_net = core.Net('pipe:instance:init') exit_net = core.Net('pipe:instance:exit') read_nets, status, rec = reader.read_record_ex(init_net, exit_net) init_net.ConstantFill( [], [status], shape=[], value=False, dtype=core.DataType.BOOL ) if rec is not None: out_queue, writer = _init_output( output, capacity, global_init_net, global_exit_net) write_nets, _ = writer.write_record_ex( rec, init_net, exit_net, status) else: out_queue = None write_nets = [] with ops.task_init(): ops.net(global_init_net) with ops.task_instance_init(): ops.net(init_net) timer_start_net = core.Net('timer_start') timer = timer_start_net.TimerBegin([], counter_name=profiler_name) timer_end_net = core.Net('timer_end') timer_end_net.TimerEnd(timer, []) ops.net(core.execution_step( 'body', [timer_start_net] + list(read_nets) + list(write_nets) + [timer_end_net], should_stop_blob=status)) ops.net(timer_end_net) with ops.task_instance_exit(): ops.net(exit_net) with ops.task_exit(): ops.net(global_exit_net) return out_queue, task
def _pipe_step(input, output=None, num_threads=1, processor=None, name=None, capacity=None, group=None, final_outputs=None): """ """ if isinstance(input, Reader): reader = input elif hasattr(input, 'reader'): reader = input.reader() else: raise ValueError('in must be a reader, queue or streaam.') if processor is not None: reader = ProcessingReader(reader, processor) if num_threads == 0: assert output is None return reader, None if name is None and processor is not None: name = processor_name(processor) if name is None and output is not None: name = 'pipe_into:%s' % processor_name(output) if name is None: name = 'pipe_from:%s' % processor_name(input) node_name = str(Node.current()) profiler_name = "{0}/{1}/{2}/{3}/{4}".format( node_name, "pipe", name, processor_name(input) if input else "NoInput", processor_name(output) if output else "NoOutput") with Task(name=name, group=group, outputs=final_outputs) as task: global_exit_net = core.Net('exit') global_init_net = core.Net('init') reader.setup_ex(global_init_net, global_exit_net) out_queue = None writer = None steps = [] for thread_id in range(num_threads): with NetBuilder(name='t:%d' % thread_id) as nb: init_net = core.Net('init') exit_net = core.Net('exit') read_nets, status, rec = reader.read_record_ex( init_net, exit_net) init_net.ConstantFill([], [status], shape=[], value=False, dtype=core.DataType.BOOL) if rec is not None: if writer is None: # hack so that the out queue gets the right name prefix # (otherwise they would be prefixed with the thread id) with NetBuilder(_fullname=task.name): out_queue, writer = _init_output( output, capacity, global_init_net, global_exit_net) write_nets, _ = writer.write_record_ex( rec, init_net, exit_net, status) else: write_nets = [] timer_start_net = core.Net('timer_start') timer = timer_start_net.TimerBegin([], counter_name=profiler_name) timer_end_net = core.Net('timer_end') timer_end_net.TimerEnd(timer, []) ops.net(init_net) ops.net( core.execution_step('body', [timer_start_net] + list(read_nets) + list(write_nets) + [timer_end_net], should_stop_blob=status)) ops.net(timer_end_net) ops.net(exit_net) steps.append(core.to_execution_step(nb)) ops.net(global_init_net) ops.net(core.execution_step('body', steps, concurrent_substeps=True)) ops.net(global_exit_net) return out_queue, task
def _pipe_step(input, output=None, num_threads=1, processor=None, name=None, capacity=None, group=None, final_outputs=None): """ """ if isinstance(input, Reader): reader = input elif hasattr(input, 'reader'): reader = input.reader() else: raise ValueError('in must be a reader, queue or streaam.') if processor is not None: reader = ProcessingReader(reader, processor) if num_threads == 0: assert output is None return reader, None if name is None and processor is not None: name = processor_name(processor) if name is None and output is not None: name = 'pipe_into:%s' % processor_name(output) if name is None: name = 'pipe_from:%s' % processor_name(input) with Task(name=name, group=group, outputs=final_outputs) as task: global_exit_net = core.Net('exit') global_init_net = core.Net('init') reader.setup_ex(global_init_net, global_exit_net) out_queue = None writer = None steps = [] for thread_id in range(num_threads): with NetBuilder(name='t:%d' % thread_id) as nb: init_net = core.Net('init') exit_net = core.Net('exit') read_nets, status, rec = reader.read_record_ex( init_net, exit_net) if rec is not None: if writer is None: # hack so that the out queue gets the right name prefix # (otherwise they would be prefixed with the thread id) with NetBuilder(_fullname=task.name): out_queue, writer = _init_output( output, capacity, global_init_net, global_exit_net) write_nets, _ = writer.write_record_ex( rec, init_net, exit_net, status) else: write_nets = [] ops.net(init_net) ops.net( core.execution_step('body', list(read_nets) + list(write_nets), should_stop_blob=status)) ops.net(exit_net) steps.append(core.to_execution_step(nb)) ops.net(global_init_net) ops.net(core.execution_step('body', steps, concurrent_substeps=True)) ops.net(global_exit_net) return out_queue, task