def hardcode_vars(): ### Setup execution block ############################################### # Context setup. context = DataContext(name='Data') context['a'] = 0.5 context['b'] = 3.0 context['c'] = 4.0 """ context.defer_events = True x = arange(0,10,.01) context['a'] = 1.0 context['b'] = array([1, 2, 3]) context['c'] = array([4, 5, 6]) context.defer_events = False """ context.defer_events = False code = "from blockcanvas.debug.my_operator import add, mul\n" \ "from numpy import arange\n" \ "x = arange(0,10,.1)\n" \ "c1 = mul(a,a)\n" \ "x1 = mul(x,x)\n" \ "t1 = mul(c1,x1)\n" \ "t2 = mul(b, x)\n" \ "t3 = add(t1,t2)\n" \ "y = add(t3,c)\n" return code, context
def test_persistence(): """ Checking if the data persists correctly when saving and loading back """ d1 = DataContext(name='test_context1', subcontext={'a': 1, 'b': 2}) d2 = DataContext( name='test_context2', subcontext={'foo': 100, 'bar': 200, 'baz': 300}) m = MultiContext(d1, d2, name='test_mc') f = StringIO() m.save(f) f.seek(0, 0) new_m = MultiContext.load(f) assert m.name == new_m.name # Test keys of new_m assert set(new_m.keys()) == set(m.keys()) # Check values assert new_m['a'] == m['a'] assert new_m['b'] == m['b'] assert new_m['foo'] == m['foo'] assert new_m['bar'] == m['bar'] # Check contexts assert new_m.subcontexts == m.subcontexts
def setUp(self): unittest.TestCase.setUp(self) dc = DataContext(name='dc') self.context = DataContext(subcontext=dc) dc['depth'] = arange(0., 10000., 1000.) self.context['context'] = dc.subcontext self.code_dir = os.path.join(os.path.dirname(__file__), 'with_mask_codes')
def test_comparison(): class _TestContext(DataContext): pass a = DataContext(name='a') b = DataContext(name='b') c = _TestContext(name='c') assert a == b assert a != c
def test_assign_value(self): context = DataContext() context.on_trait_change(self.event_listener, 'items_modified') context['a'] = 'foo' self.assertEqual(self.event_count, 1) self.assertEqual(self.last_event.added, ['a']) self.assertEqual(self.last_event.modified, []) self.assertEqual(self.last_event.removed, [])
def test_checkpoint(): d = DataContext() d['a'] = object() d['b'] = object() copy = d.checkpoint() assert copy is not d assert copy.subcontext is not d.subcontext assert set(copy.keys()) == set(d.keys()) assert copy['a'] is d['a'] assert copy['b'] is d['b']
def test_defer_add_event(self): context = DataContext() context.on_trait_change(self.event_listener, 'items_modified') context.defer_events = True context['a'] = 'foo' context.defer_events = False self.assertEqual(self.event_count, 1) self.assertEqual(self.last_event.added, ['a']) self.assertEqual(self.last_event.modified, []) self.assertEqual(self.last_event.removed, [])
def test_keys(self): """ Tests to ensure 2 contexts that contain overlapping sets of keys appear to have one set of keys where each key is unique (i.e. a 'set') """ d1 = DataContext(name='d1', subcontext={'a': 1, 'b': 2}) d2 = DataContext(name='d2', subcontext={'a': 3, 'c': 4}) m = MultiContext(d1, d2, name='m') sorted_keys = sorted(m.keys()) self.assertEqual(sorted_keys, ['a', 'b', 'c'])
def test_delete_after_add(self): context = DataContext() context.on_trait_change(self.event_listener, 'items_modified') self.assertEqual(self.event_count, 0) context.defer_events = True self.assertEqual(self.event_count, 0) context['a'] = 'foo' self.assertEqual(self.event_count, 0) del context['a'] self.assertEqual(self.event_count, 0) context.defer_events = False self.assertEqual(self.event_count, 0)
def test_persistence(): """ Can DataContexts round-trip through the persistence mechanism? """ d = DataContext(name='test_context') d['a'] = 1 d['b'] = 2 f = StringIO() d.save(f) f.seek(0, 0) d2 = DataContext.load(f) assert d.name == d2.name assert set(d2.keys()) == set(['a', 'b']) assert d2['a'] == d['a'] assert d2['b'] == d['b']
def test_persistence(): """ Can DataContexts round-trip through the persistence mechanism? """ d = DataContext(name='test_context') d['a'] = 1 d['b'] = 2 f = BytesIO() d.save(f) f.seek(0, 0) d2 = DataContext.load(f) assert d.name == d2.name assert set(d2.keys()) == set(['a', 'b']) assert d2['a'] == d['a'] assert d2['b'] == d['b']
def setup(self): d = DataContext() d['a'] = 1 d['b'] = 2.0 tcw = TraitslikeContextWrapper(_context=d) tcw.add_traits('c', a=Int, b=Float) self.d = d self.tcw = tcw
def test_defer_multiple_events(self): context = DataContext() context.on_trait_change(self.event_listener, 'items_modified') context.defer_events = True self.assertEqual(self.event_count, 0) context['a'] = 'foo' self.assertEqual(self.event_count, 0) context['a'] = 'foo2' self.assertEqual(self.event_count, 0) context['b'] = 'bar' self.assertEqual(self.event_count, 0) context.defer_events = False # the modified will be empty, because it was also added self.assertEqual(self.event_count, 1) self.assertEqual(set(self.last_event.added), set(['a', 'b'])) self.assertEqual(self.last_event.modified, []) self.assertEqual(self.last_event.removed, [])
def test_checkpoint(): d1 = DataContext() d1['a'] = object() d2 = DataContext() d2['b'] = object() m = MultiContext(d1, d2) copy = m.checkpoint() assert copy is not m assert copy.subcontexts is not m.subcontexts assert len(copy.subcontexts) == len(m.subcontexts) for csc, msc in zip(copy.subcontexts, m.subcontexts): assert csc is not msc assert set(csc.keys()) == set(msc.keys()) for key in list(msc.keys()): assert csc[key] is msc[key] assert set(copy.keys()) == set(m.keys()) assert copy['a'] is m['a'] assert copy['b'] is m['b']
def test_block_events(self): import numpy from codetools.blocks.api import Block context = DataContext(name="data") context.on_trait_change(self.event_listener, 'items_modified') context.defer_events = True context['a'] = 4 context['b'] = numpy.array((1,2,3)) context.defer_events = False self.assertEqual(self.event_count, 1) multi_context = MultiContext(context, name="multi") multi_context.on_trait_change(self.event_listener, 'items_modified') block = Block("c = a * b") block.execute(multi_context) # we expect one event from data context, one from multi context self.assertEqual(self.event_count, 3)
def test_contexts_list_changes(self): """ Checking if change in items in contexts updates the multi-context """ d1 = DataContext(name='test_context1', subcontext={'a': 1, 'b': 2}) d2 = DataContext(name='test_context2') m = MultiContext(* [d1, d2], **{'name': 'test_mc'}) self.assertTrue(len(list(m.keys())) == 2) # Add another context d3 = DataContext(name='test_context3', subcontext={'c': 3, 'd': 4}) m.subcontexts.append(d3) self.assertTrue(len(list(m.keys())) == 4) # Modify an existing context m.subcontexts[1].subcontext = {'cc': 5} self.assertTrue(len(list(m.keys())) == 5) # Remove a context m.subcontexts.pop(0) self.assertTrue(len(list(m.keys())) == 3)
def test_simple_execution_manager(self): code = """b=a*10 """ e = FormulaExecutingContext() e.data_context = DataContext() e.data_context['a'] = 5 e.external_code = code assert (e.data_context['b'] == 50) e.external_code = """c=a*20 """ assert (e.data_context['c'] == 100) e['f'] = '=a*100' assert (e.data_context['f'] == 500) assert (e['f'] == '500=a*100')
def __init__(self, code=None, context=None, **traits): """ Sync the status message to the status information in the BlockUnit """ super(BlockApplication, self).__init__(**traits) if context is None: context = DataContext() if code is None: self.block_unit = BlockUnit(data_context = context) else: self.block_unit = BlockUnit(code=code, data_context=context) # sync the status, but make sure to get the initial status first self.status = self.block_unit.status self.sync_trait('status', self.block_unit, mutual=True)
def setUp(self): unittest.TestCase.setUp(self) # Put unit adapters on either side of a masking adapter to see if they # cooperate. Store meters in the raw context, push fathoms through the # mask, and expose feet to the outside world. self.units = units = {'in': meters, 'mid': fathom, 'out': feet} # Set up data for the contexts depth = UnitArray(linspace(0.0, 100.0, 11), units=units['in']) lith = array(['sand'] * len(depth), dtype=object) # Create the contexts self.context = AdaptedDataContext(subcontext=DataContext()) self.raw_context = self.context.subcontext # Add data (before creating the adapters) self.context.update(depth=depth, lith=lith) # (This simplifies creating UnitConversionAdapters) def always(value): class C(dict): def get(self, key, default=None): return value def __repr__(self): return '{*:%r}' % value return C() # Layer multiple adapters d = depth.view(ndarray) self.mask = (15.0 < d) & (d < 55.0) self.convert_out = lambda x: convert(x, units['in'], units['out']) self.convert_in = lambda x: convert(x, units['out'], units['in']) #self.context.push_adapter( # UnitConversionAdapter(setitem_units=always(units['in']), # getitem_units=always(units['mid']))) self.context.push_adapter(MaskingAdapter(mask=self.mask)) self.context.push_adapter( # UnitConversionAdapter(setitem_units=always(units['mid']), UnitConversionAdapter(setitem_units=always(units['in']), getitem_units=always(units['out'])))
def get_context(self): """ Finalize the context """ self.context = None if os.path.splitext(self.filename)[1] == '.pickle': self.context = DataContext.load_context_from_file(self.filename) elif self.model: if isinstance(self.model, FileLogReaderUI): reader = self.model.active_reader if reader: log_info = reader.read_info(self.filename) self.context = create_geo_context( reader.read_data(self.filename, log_info), log_info.well_name) else: self.context = self.model.read_data() return
def copy(self): """Make a deep copy of this FormulaExecutingContext. Useful for plot shadowing.""" new_datacontext = DataContext() for key in list(self.data_context.keys()): try: new_datacontext[key] = copy(self.data_context[key]) except: new_datacontext[key] = self.data_context[key] # turn off auto-firing of events during construction, then turn it back on # after everything is set up new = FormulaExecutingContext(data_context=new_datacontext, external_block=self.external_block, execution_needed=self.execution_needed, auto_execute=False, _expressions=self._expressions) new._regenerate_expression_block() new._regenerate_composite_block() new.auto_execute = self.auto_execute return new
def test_block_events(self): import numpy from codetools.blocks.api import Block context = DataContext(name="data") context.on_trait_change(self.event_listener, 'items_modified') context.defer_events = True context['a'] = 4 context['b'] = numpy.array((1, 2, 3)) context.defer_events = False self.assertEqual(self.event_count, 1) multi_context = MultiContext(context, name="multi") multi_context.on_trait_change(self.event_listener, 'items_modified') block = Block("c = a * b") block.execute(multi_context) # we expect one event from data context, one from multi context self.assertEqual(self.event_count, 3)
def read_data(self): """ Obtain x_locations, y_locations, data_locations, traces in a context Returns: --------- context: DataContext """ # Check if the filename is valid for reading data if not self.file_handle: return None # Set the file reader at the first char. if self.file_handle.closed: self.file_handle = file(self.filename, 'rb') # Setup a progress dialog progress = ProgressDialog(title='Reading Segy Files', message='Reading Segy Files', max=100, show_time=True, can_cancel=True) progress.open() # Skip the card_image_header and binary header self.file_handle.seek(Segy.CARD_IMAGE_HEADER_LEN + Segy.BINARY_HEADER_LEN) progress.update(1) # Check if data lengths are correct. x_data_len = struct.calcsize(self.x_format) y_data_len = struct.calcsize(self.y_format) inline_data_len = struct.calcsize(self.inline_format) crossline_data_len = struct.calcsize(self.crossline_format) if not (x_data_len == y_data_len and y_data_len == inline_data_len and inline_data_len == crossline_data_len): logger.error('SegyReader: Mismatch in format lengths') return None if self.scale_format != '': scale_data_len = struct.calcsize(self.scale_format) if scale_data_len != x_data_len: logger.error('SegyReader: Mismatch in format lengths') return None # Get trace header data of 240 bytes. header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN) traces, read_error = [], False previous_update = 1 while header_data != '' and not read_error: trace = self._read_trace(header_data, x_data_len) if trace is None: logger.error('SegyReader: Error in reading a trace') read_error = True else: traces.append(trace) header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN) progress_pc = 1 + int( 98.0 * float(len(traces)) / float(self.trace_count)) if progress_pc - previous_update > 1: cont_val, skip_val = progress.update(progress_pc) previous_update = progress_pc # If the user has cancelled the action then stop the import # immediately if skip_val or not cont_val: del traces self.file_handle.close() return None self.file_handle.close() progress.update(100) if read_error: del traces return None else: arr_descriptor = { 'names': ('x', 'y', 'inline', 'crossline', 'scale_factor', 'trace'), 'formats': ('f4', 'f4', 'f4', 'f4', 'f4', str(self.samples_per_trace) + 'f4') } traces = array(traces, dtype=arr_descriptor) filesplit = os.path.split(self.filename) name = str(os.path.splitext( filesplit[1])[0]).translate(trans_table) return DataContext(name=name, _bindings={ 'traces': traces['trace'], 'x_locations': traces['x'], 'y_locations': traces['y'], 'inline_values': traces['inline'], 'crossline_values': traces['crossline'], 'scale_factors': traces['scale_factor'] }) return
def load_project_from_file(self, file_path): """ Load a project from .prj files. Parameters: ----------- file_path: Str Complete file-path where the project is saved. """ logger.debug('BlockUnit: Loading project from %s' %file_path) del self.block_unit self.block_unit = BlockUnit() if not os.path.exists(file_path): msg = 'BlockUnit: Loading of project at ' + \ file_path + ' failed: Path does not exist.' logger.error(msg) return self.project_file_path = file_path # Read the .prj file and retrieve information about where the script # and context are separately saved. file_object = open(file_path, 'rb') lines = file_object.readlines() lines_split = [line.split('=') for line in lines] lines_dict = {} for line in lines_split: key = line[0].strip().lower() if key != '': lines_dict[key] = line[1].strip() file_object.close() # Read the code and build the block s_path = 'script_path' if lines_dict.has_key(s_path) and os.path.exists(lines_dict[s_path]): self.load_block_from_file(lines_dict[s_path]) else: msg = 'BlockUnit: Loading of script for project at ' + \ file_path + ' failed.' logger.error(msg) # Read the context file context, c_path = None, 'context_path' if lines_dict.has_key(c_path) and os.path.exists(lines_dict[c_path]): context = DataContext.load_context_from_file(lines_dict[c_path]) # Read the layout file l_path = 'layout_path' if lines_dict.has_key(l_path) and os.path.exists(lines_dict[l_path]): self.block_unit.codeblock_ui.block_controller.layout_engine.load_layout(lines_dict[l_path]) # Assign the context if any if context is not None: self.block_unit.data_context = context else: msg = 'BlockUnit: Loading of context for project at ' + \ file_path + ' failed.' logger.error(msg) # Interactor range files for key in INTERACTOR_LIST: if lines_dict.has_key(key): self.interactor_range_files[key] = lines_dict[key] # Loading saved contexts self.exec_context.saved_contexts = {} saved_context_keys = [key for key in lines_dict.keys() if key.startswith(saved_context_prefix) ] if len(saved_context_keys): for key in saved_context_keys: final_key = key.replace(saved_context_prefix,'') self.exec_context.saved_contexts[final_key] = lines_dict[key] return
def context_factory(self, *args, **kw): """ Return the type of context we are testing. """ return MultiContext(DataContext(*args, **kw))
def main(): # Search boxes for finding functions to place on module. function_search = HandledFunctionSearch() ### Setup execution block ############################################### # Context setup. context = DataContext(name='Data') context['a'] = 1.0 context.defer_events = False ### Setup the main application object ################################### # Reload from a file # Note: test case for block persistence, set the file_path to '' if # persistence need not be tested file_path = '' if not os.path.isfile(file_path): code = "from numpy import arange\n" \ "b=3\n" \ "c=4\n" \ "x = arange(0,10,.1)\n" \ "y = a*x**2 + b*x + c\n" bu = BlockUnit(code=code, data_context=context) else: bu = BlockUnit(data_context=context) bu.load_block_from_file(file_path) def loop_interactor(interactor): import time import numpy time.sleep(1) for i in range(1,100): interactor.interactor_shadow.input_a = numpy.sin(i/10) time.sleep(0.1) print "done" import sys sys.exit(0) from blockcanvas.interactor.configurable_interactor import ConfigurableInteractor from blockcanvas.interactor.shadow_interactor import ShadowInteractor from blockcanvas.interactor.interactor_config import PlotConfig, InteractorConfig, VariableConfig from blockcanvas.plot.configurable_context_plot import ConfigurableContextPlot from blockcanvas.block_display.block_unit_variables import \ BlockUnitVariableList from threading import Thread vars = BlockUnitVariableList(block = bu.codeblock.block, context = bu._exec_context) config = InteractorConfig(vars = vars.variables, var_configs=[VariableConfig(name='a', type="Shadow")], plot_configs=[PlotConfig(x='x', y='y')]) interactor = ConfigurableInteractor(context = bu._exec_context, block = bu.codeblock.block, interactor_config = config) # Thread(target=loop_interactor, args=(interactor,)).start() interactor.edit_traits(kind='livemodal')
# Test if __name__ == '__main__': # # 1. Usage without using block and context: # from numpy import arange, zeros # depth = arange(0., 10000., 1000.) # vp = zeros(depth.shape) # vs = zeros(depth.shape) # with Mask((depth < 4000.0) & (depth > 1000.0)): # vp = 1.0 # vs = 1.5 # print vp, vs # 2. Usage with block and context: from numpy import arange, zeros from codetools.blocks.api import Block dc = DataContext(name='dc') context = ParametricContext(dc) dc['depth'] = arange(0., 10000., 1000.) dc['vp'] = zeros(dc['depth'].shape) dc['vs'] = zeros(dc['depth'].shape) context['context'] = dc._bindings code = 'from __future__ import with_statement\n'\ 'from numpy import zeros\n'\ 'from codetools.contexts.with_mask import Mask\n'\ 'with Mask((depth < 4000.0) & (depth > 1000.0)):vp=1.5 ; vs=1.0' ## # Expanded form of with statement taken from PEP 343. This is just for testing ## code = 'from numpy import zeros\n'\ ## 'array_len = depth.shape\n'\ ## 'vp = zeros(array_len)\n'\
def main(): # Search boxes for finding functions to place on module. function_search = HandledFunctionSearch() ### Setup execution block ############################################### # Context setup. context = DataContext(name='Data') context['a'] = 1.0 context.defer_events = False ### Setup the main application object ################################### # Reload from a file # Note: test case for block persistence, set the file_path to '' if # persistence need not be tested file_path = '' if not os.path.isfile(file_path): code = "from numpy import arange\n" \ "b=3\n" \ "c=4\n" \ "x = arange(0,10,.1)\n" \ "y = a*x**2 + b*x + c\n" bu = BlockUnit(code=code, data_context=context) else: bu = BlockUnit(data_context=context) bu.load_block_from_file(file_path) def loop_interactor(interactor): import time import numpy time.sleep(1) for i in range(1, 100): interactor.interactor_shadow.input_a = numpy.sin(i / 10) time.sleep(0.1) print "done" import sys sys.exit(0) from blockcanvas.interactor.configurable_interactor import ConfigurableInteractor from blockcanvas.interactor.shadow_interactor import ShadowInteractor from blockcanvas.interactor.interactor_config import PlotConfig, InteractorConfig, VariableConfig from blockcanvas.plot.configurable_context_plot import ConfigurableContextPlot from blockcanvas.block_display.block_unit_variables import \ BlockUnitVariableList from threading import Thread vars = BlockUnitVariableList(block=bu.codeblock.block, context=bu._exec_context) config = InteractorConfig( vars=vars.variables, var_configs=[VariableConfig(name='a', type="Shadow")], plot_configs=[PlotConfig(x='x', y='y')]) interactor = ConfigurableInteractor(context=bu._exec_context, block=bu.codeblock.block, interactor_config=config) # Thread(target=loop_interactor, args=(interactor,)).start() interactor.edit_traits(kind='livemodal')
def __init__(self): self.data_context = DataContext() self.expression_context = ExpressionContext(self.data_context) self.plots = VPlotContainer() return
array of the same length as the data named 'name'. """ raise NotImplementedError ########################################################################## # PlotDataContextAdapter interface ########################################################################## def get_datasource(self, name): """Gets a chaco DataSource for the named object.""" return DataContextDataSource(context=self.context, context_name=name) @on_trait_change('context:items_modified') def _fire_data_changed(self, obj, name, old, value): """ Translate 'items_modified' event to a chaco 'data_changed' event. """ event = {} event['added'] = value.added event['removed'] = value.removed event['changed'] = value.modified self.data_changed = event if __name__ == '__main__': from numpy import array data = DataContext() data['vp'] = array((1, 2, 3, 4)) data['vs'] = array((1, 2, 3, 4)) / 2. plot_data = PlotDataContextAdapter(context=data) print plot_data.get_data('vp')
def setUp(self): dc = DataContext(name='dc') dc['depth'] = arange(0., 10000., 1000.) self.context = dc
def setUp(self): self.context = AdaptedDataContext(context=DataContext())