def get_function_package(function_name, argument_package_list, return_package, work_range, merge_func='', merge_order=''): fp = Function_package() fp.set_function_name(function_name) fp.set_function_args(argument_package_list) from OpenGL.GL import glGetFloatv, GL_MODELVIEW_MATRIX fp.mmtx = glGetFloatv(GL_MODELVIEW_MATRIX) def flip_diagonal(mmtx): new_mmtx = numpy.empty((4,4),dtype=numpy.float32) for i in range(4): for j in range(4): new_mmtx[i][j] = mmtx[j][i] return new_mmtx fp.mmtx = flip_diagonal(fp.mmtx) fp.work_range = work_range fp.output = return_package return fp
def run_function(return_name=None, func_name='', execid=[], work_range=None, args=[], dtype_dict={}, output_halo=0, halo_dict={}, split_dict={}, merge_func='', merge_order=''): def input_argument_check(): if type(func_name) != str: print "Function_name error" print "Func_name: ", func_name if type(dtype_dict) != dict: print "Dtype_dict error" print "Dtype_dict: ", dtype_dict assert (False) if type(split_dict) != dict: print "Split_dict error" print "Split_dict: ", split_dict assert (False) if type(halo_dict) != dict: print "Halo_dict error" print "Halo_dict: ", halo_dict assert (False) if type(merge_func) != str: print "Merge function_name error" print "Merge_function name: ", merge_func # compatibility ############################################################ function_name = func_name args_list = args modifier_dict = {} # input argument error check input_argument_check() # initialization ############################################################## global mmtx, inv_mmtx global unique_id function_package = Function_package() fp = function_package if Vivaldi_viewer.v != None: mmtx = Vivaldi_viewer.mmtx inv_mmtx = Vivaldi_viewer.inv_mmtx Debug = False # arguments ################################################################################## new_args = [] for data_name in args_list: if data_name in data_package_list: # data_name, is managed as data package in the main manager dp = data_package_list[data_name] dtype = str(dp.data_contents_dtype) dtype = dtype.replace('_volume', '') function_name += dtype # we should give access to data to reader, before running function # check it is already available from reader or not data = globals()[data_name] u = dp.unique_id flag = False if dp.out_of_core and u not in retain_list: flag = True # out of core and Not informed to memory manager if not dp.out_of_core and u not in retain_list: flag = True # in core and Didn't informed ( function output already informed) if flag: manage_as_data_package(data_name) dp = data_package_list[data_name] u = dp.unique_id if u not in retain_list: retain_list[u] = [] retain_list[u].append(dp.copy()) reader_give_access_to_data(data_name) dp = dp.copy() dp.data = None dp.devptr = None elif data_name in globals(): # There are two kinds of data here # 1. volume # 2. values data = globals()[data_name] if type(data) == numpy.ndarray: # this is volume # now the data is also managed as data package manage_as_data_package(data_name) # now we have data_package correspond to the data dp = data_package_list[data_name] u = dp.unique_id if u not in retain_list: retain_list[u] = [] retain_list[u].append(dp.copy()) # Vivaldi reader have access to this data reader_give_access_to_data(data_name) # than make a new function name using the existing function name, the data_name and data dtype dtype = str(dp.data_contents_dtype) dtype = dtype.replace('_volume', '') function_name += str(dp.data_contents_dtype) dp = dp.copy() dp.data = None dp.devptr = None else: # this is constant dp = Data_package() dtype = type(data_name) dp.data_name = data_name dp.unique_id = -1 dp.data_dtype = dtype dp.data_contents_dtype = dtype dp.data_contents_memory_dtype = dtype dp.data = data else: # data_name not in the globals list # it is usually AXIS or constant like x,y not previously defined if isinstance(data_name, Data_package): dp = data_name else: data = None dp = Data_package() dtype = type(data_name) dp.data_name = data_name dp.unique_id = -1 dp.data_dtype = dtype dp.data_contents_dtype = dtype dp.data_contents_memory_dtype = dtype dp.data = data_name new_args.append(dp) args_list = new_args # get Vivaldi functions ###################################################################################### global parsed_Vivaldi_functions func_args = args_list return_dtype = parsed_Vivaldi_functions.get_return_dtype(function_name) fp.set_function_name(function_name) fp.output.unique_id = unique_id fp.mmtx = mmtx fp.inv_mmtx = inv_mmtx fp.output.data_dtype = numpy.ndarray fp.output.data_name = return_name if return_dtype == '': print "=======================================================" print "VIVALDI ERROR, can not find return dtype" print "function_name:", function_name print "return name:", return_name print "return dtype:", return_dtype print "=======================================================" assert (False) fp.output.set_data_contents_dtype(return_dtype) v = Vivaldi_viewer.v trans_on = Vivaldi_viewer.trans_on transN = Vivaldi_viewer.transN if trans_on == True: if v.getIsTFupdated() == 1: fp.trans_tex = v.getTFF() fp.update_tf = 1 fp.update_tf2 = 0 v.TFF.widget.updated = 0 elif v.getIsTFupdated2() == 1: fp.trans_tex = v.getTFF2() fp.update_tf = 0 fp.update_tf2 = 1 v.TFF2.widget.updated = 0 fp.TF_bandwidth = v.getTFBW() fp.CRG = v.window.CRG output_halo = 0 if type(work_range) == dict: if 'work_range' in work_range: work_range = work_range['work_range'] if return_name != None: # merge_func ############################################################### func_args = ['front', 'back'] func_dtypes = {} for elem in func_args: func_dtypes[elem] = return_dtype new_name = make_func_name_with_dtypes(merge_func, func_args, func_dtypes) merge_func = new_name # execid ################################################################################### if isinstance(execid, Data_package): execid = execid.data if type(execid) != list: execid = [execid] execid_list = execid fp.execid_list = execid # work range ################################################################################## if type(work_range) == dict and work_range == {}: for data_name in args_list: if isinstance(data_name, Data_package): if dp.unique_id == -1: continue data_name = dp.data_name dp = data_package_list[data_name] work_range = dp.full_data_range break work_range = to_range(work_range) if return_name == '': return_name = None work_range = {'work_range': work_range} args = [ return_name, function_name, execid, work_range, args_list, dtype_dict, output_halo, halo_dict, split_dict, merge_func, merge_order ] return None, run_function, args #return None # local functions ############################################################################ def make_tasks2(arg_packages, i): global unique_id if i == len(args_list): # common variables global unique_id fp.function_args = arg_packages modifier = modifier_dict['output'] if decom == 'in_and_out_split1': num = modifier['num'] work_range = modifier['range_list'][num - 1] fp.work_range = work_range split = modifier['split'] data_halo = modifier['data_halo'] buffer_halo = modifier['buffer_halo'] full_data_range = modifier['data_range'] fp.output.data_halo = data_halo split_position = make_split_position(split, num) fp.output.split_position = str(split_position) data_range = apply_halo(work_range, data_halo) fp.output.set_data_range(str(data_range)) fp.output.set_full_data_range(str(full_data_range)) fp.output.set_buffer_range(buffer_halo) modifier['num'] += 1 elif decom == 'in_and_out_split2': num = modifier['num'] work_range = modifier['range_list'][num - 1] fp.work_range = work_range split = modifier['split'] data_halo = modifier['data_halo'] buffer_halo = modifier['buffer_halo'] full_data_range = modifier['data_range'] fp.output.data_halo = data_halo split_position = make_split_position(split, num) fp.output.split_position = str(split_position) data_range = apply_halo(work_range, data_halo) fp.output.set_data_range(str(data_range)) fp.output.set_full_data_range(str(full_data_range)) fp.output.set_buffer_range(buffer_halo) modifier['num'] += 1 elif decom == 'in': fp.output.unique_id = unique_id output_range = apply_halo(output_range_list[0], output_halo) fp.output.set_data_range(output_range) fp.output.split_shape = str(SPLIT_BASE) fp.output.split_position = str(SPLIT_BASE) fp.work_range = output_range # buffer modifier = modifier_dict['output'] buffer_halo = modifier['buffer_halo'] fp.output.set_buffer_range(buffer_halo) elif decom == 'out': num = modifier['num'] work_range = modifier['range_list'][num - 1] fp.work_range = work_range split = modifier['split'] data_halo = modifier['data_halo'] buffer_halo = modifier['buffer_halo'] full_data_range = modifier['data_range'] fp.output.data_halo = data_halo split_position = make_split_position(split, num) fp.output.split_position = str(split_position) data_range = apply_halo(work_range, data_halo) fp.output.set_data_range(str(data_range)) fp.output.set_full_data_range(str(full_data_range)) fp.output.set_buffer_range(buffer_halo) modifier['num'] += 1 u = fp.output.unique_id unique_id += 1 mem_retain(fp.output) if u not in retain_list: retain_list[u] = [] retain_list[u].append(fp.output.copy()) register_function(execid, fp) return dp = args_list[i] data_name = dp.data_name dp.memory_type = 'memory' # normal variables if dp.unique_id != -1: # setting about full data dp.split_shape = str(SPLIT_BASE) buf = dp.data # replace copy of original dp = dp.copy() if decom == 'in_and_out_split1': """ input output decomposition """ global in_and_out_n u = dp.unique_id data_name = dp.data_name modifier = modifier_dict[ data_name] if data_name in modifier_dict else {} # split shape split_shape = modifier['split'] dp.split_shape = str(split_shape) range_list = data_range_list_dict[data_name] data_halo = modifier['data_halo'] buffer_halo = modifier['buffer_halo'] cnt = modifier['cnt'] split_position = make_split_position(split_shape, in_and_out_n) dp.split_position = str(split_position) # set data_range data_range = apply_halo(range_list[in_and_out_n - 1], data_halo, dp.full_data_range) dp.set_data_range(data_range) dp.data_halo = data_halo # set buffer halo buffer_range = apply_halo(range_list[in_and_out_n - 1], buffer_halo) dp.set_buffer_range(buffer_range) dp.buffer_halo = buffer_halo if Debug: print "In and out DP", dp make_tasks2(arg_packages + [dp], i + 1) elif decom == 'in_and_out_split2': u = dp.unique_id data_name = dp.data_name modifier = modifier_dict[ data_name] if data_name in modifier_dict else {} # split shape split_shape = modifier['split'] dp.split_shape = str(split_shape) # make splited data and go to next argument data_name = dp.data_name data_halo = modifier['data_halo'] data_range_list = data_range_list_dict[data_name] buffer_halo = modifier['buffer_halo'] n = 1 dp.data_halo = data_halo for data_range in data_range_list: data_range = apply_halo(data_range, data_halo) dp.data_dtype = numpy.ndarray dp.set_data_range(data_range) dp.data_halo = data_halo memory_shape = dp.data_memory_shape shape = dp.data_shape bytes = dp.data_bytes # make depth depth = make_depth(data_range, mmtx) dp.depth = depth fp.output.depth = depth mem_depth(u, str(SPLIT_BASE), str(SPLIT_BASE), depth) split_position = make_split_position(split_shape, n) n += 1 dp.split_position = str(split_position) mem_depth(data_list[data_name], str(split_shape), dp.split_position, depth) dp.set_buffer_range(buffer_halo) make_tasks2(arg_packages + [dp], i + 1) elif decom == 'in': """ input decomposition data range is same """ u = dp.unique_id data_name = dp.data_name modifier = modifier_dict[ data_name] if data_name in modifier_dict else {} # split shape split_shape = modifier['split'] dp.split_shape = str(split_shape) # make splited data and go to next argument data_name = dp.data_name data_halo = modifier['data_halo'] data_range_list = data_range_list_dict[data_name] buffer_halo = modifier['buffer_halo'] n = 1 dp.data_halo = data_halo for data_range in data_range_list: data_range = apply_halo(data_range, data_halo) dp.data_dtype = numpy.ndarray dp.set_data_range(data_range) dp.data_halo = data_halo memory_shape = dp.data_memory_shape shape = dp.data_shape bytes = dp.data_bytes # make depth depth = make_depth(data_range, mmtx) dp.depth = depth fp.output.depth = depth mem_depth(u, str(SPLIT_BASE), str(SPLIT_BASE), depth) split_position = make_split_position(split_shape, n) n += 1 dp.split_position = str(split_position) mem_depth(data_list[data_name], str(split_shape), dp.split_position, depth) dp.set_buffer_range(buffer_halo) if Debug: print "DP", dp make_tasks2(arg_packages + [dp], i + 1) elif decom == 'out': u = dp.unique_id # basic package setting dp.split_shape = str(SPLIT_BASE) dp.split_position = str(SPLIT_BASE) data_name = dp.data_name modifier = modifier_dict[ data_name] if data_name in modifier_dict else {} range_list = data_range_list_dict[data_name] data_halo = modifier['data_halo'] buffer_halo = modifier['buffer_halo'] # data_range data_range = apply_halo(range_list[0], data_halo) dp.set_data_range(data_range) dp.data_halo = data_halo dp.set_full_data_range(data_range) # buffer range buffer_range = apply_halo(range_list[0], buffer_halo) dp.set_buffer_range(buffer_range) dp.buffer_halo = buffer_halo make_tasks2(arg_packages + [dp], i + 1) else: make_tasks2(arg_packages + [dp], i + 1) def check_in_and_out(modifier_dict, input_cnt, output_cnt): flag = False in_and_out_split = True # in_and_out spilt version1 test # all split shape is identical for data_name in modifier_dict: modifier = modifier_dict[data_name] if flag: if split == modifier['split']: pass else: in_and_out_split = False break else: # first one skip split = modifier['split'] flag = True if in_and_out_split: return 'in_and_out_split1' # in_and_out split version2 test # same number of input and output count if output_cnt == input_cnt: return 'in_and_out_split2' return False ############################################################################ # make argument name list args_name_list = [] for elem in args_list: if isinstance(elem, Data_package): args_name_list.append(elem.data_name) if return_name == None: return_name = 'output' # set output information modifier_dict['output'] = {} output_split = split_dict[ return_name] if return_name in split_dict else SPLIT_BASE output_data_range = work_range output_data_halo = 0 buffer_halo = 10 output_dtype = return_dtype output_range = to_range(output_data_range) output_split = to_split(output_split) output_range_list = make_range_list(output_range, output_split) cnt = shape_to_count(output_split) modifier_dict['output']['split'] = output_split modifier_dict['output']['data_range'] = output_range modifier_dict['output']['data_halo'] = output_halo modifier_dict['output']['cnt'] = cnt modifier_dict['output']['buffer_halo'] = buffer_halo modifier_dict['output']['num'] = 1 modifier_dict['output']['range_list'] = output_range_list output_cnt = cnt # temp data package temp = Data_package() temp.data_name = return_name temp.unique_id = unique_id temp.data_dtype = numpy.ndarray temp.data_halo = output_halo temp.set_data_contents_dtype(return_dtype) # modifier information about input input_cnt = 1 data_range_list_dict = {} # make modifiers_list for each argument for args in args_list: name = args.data_name if args.unique_id != -1: modifier = {} modifier['data_range'] = args.data_range modifier['dtype'] = args.data_contents_dtype data_range = args.data_range data_halo = args.data_halo data_range = apply_halo(data_range, -data_halo) split = split_dict[name] if name in split_dict else SPLIT_BASE for axis in AXIS: if axis not in split: split[axis] = 1 data_range_list = make_range_list(data_range, split) data_range_list_dict[name] = data_range_list cnt = shape_to_count(split) modifier_dict[name] = {} modifier_dict[name]['split'] = split modifier_dict[name]['data_range'] = data_range modifier_dict[name][ 'data_halo'] = halo_dict[name] if name in halo_dict else 0 modifier_dict[name]['buffer_halo'] = buffer_halo modifier_dict[name]['cnt'] = cnt input_cnt *= cnt in_and_out_split = check_in_and_out(modifier_dict, input_cnt, output_cnt) if in_and_out_split == 'in_and_out_split1': decom = 'in_and_out_split1' # this is special case called in&out split fp.output.split_shape = str(output_split) global in_and_out_n in_and_out_n = 1 for work_range in output_range_list: make_tasks2([], 0) in_and_out_n += 1 modifier = modifier_dict['output'] data_halo = modifier['data_halo'] full_data_range = apply_halo(output_range, data_halo) temp.set_data_range(str(full_data_range)) temp.set_full_data_range(str(full_data_range)) temp.set_buffer_range(str(full_data_range)) temp.data_halo = data_halo unique_id += 1 # print "TEMP", temp return temp elif in_and_out_split == 'in_and_out_split2': decom = 'in_and_out_split2' fp.output.split_shape = str(output_split) make_tasks2([], 0) modifier = modifier_dict['output'] data_halo = modifier['data_halo'] full_data_range = apply_halo(output_range, data_halo) temp.set_data_range(str(full_data_range)) temp.set_full_data_range(str(full_data_range)) temp.set_buffer_range(str(full_data_range)) temp.data_halo = data_halo unique_id += 1 return temp elif input_cnt > 1: """ input decomposition """ decom = 'in' count = input_cnt # set function package output full_data_range = apply_halo(output_range, output_halo) fp.output.set_data_range(dict(full_data_range)) fp.output.set_full_data_range(dict(full_data_range)) fp.output.data_halo = output_halo # set output package temp.set_data_range(str(full_data_range)) temp.set_full_data_range(str(full_data_range)) temp.set_buffer_range(str(full_data_range)) # register intermediate merge function u = unique_id inter = range(unique_id + 1, unique_id + count - 1) for inter_id in inter: temp.unique_id = inter_id mem_retain(temp) unique_id += count - 1 temp.unique_id = u # make input functions make_tasks2([], 0) out_range = range(unique_id, unique_id + count) # intermediate merge functions dimension = len(output_range) scheduler_request_merge(temp, out_range, merge_func, merge_order, dimension) #mem_inform(temp) return temp elif output_cnt > 1: """ output decomposition """ decom = 'out' fp.output.split_shape = str(output_split) fp.output.data_halo = output_halo full_data_range = apply_halo(output_range, output_halo) n = 1 for work_range in output_range_list: split_position = make_split_position(output_split, n) n += 1 fp.output.split_position = str(split_position) data_range = apply_halo(work_range, output_halo) fp.output.set_data_range(str(data_range)) fp.output.set_full_data_range(str(full_data_range)) fp.output.set_buffer_range(buffer_halo) make_tasks2([], 0) temp.set_data_range(str(full_data_range)) temp.set_full_data_range(str(full_data_range)) temp.set_buffer_range(str(full_data_range)) unique_id += 1 return temp else: # split input and output both, but not in&out split print "===============================" print "VIVALDI ERROR" print "tried to split input and output together but number of input split and output split is different" print "input_cnt: ", input_cnt print "output_cnt: ", output_cnt print "===============================" assert (False) assert (False)
def parallel(function_name='', argument_package_list=[], work_range={}, execid=[], output_halo=0, output_split={}, merge_func='', merge_order=''): # compatibility to old versions ############################################################ function_name = function_name.strip() def to_range(input): dtype = type(input) if type(input) == numpy.ndarray: input = list(input.shape) dtype = type(input) n = len(input) if input[n-1] in [1,2,3]: input.pop() if dtype in [tuple, list]: return shape_to_range(input) if isinstance(input, Data_package): dp = input work_range = apply_halo(dp.data_range, -dp.data_halo) return work_range if dtype == dict: return input return {} work_range = to_range(work_range) # input argument error check def input_argument_check(): if type(function_name) != str or function_name == '': print "Function_name error" print "function_name: ", function_name if function_name not in function_code_dict: print "======================================" print "Vivaldi Warning" print "the function: " + function_name + " not exist" print "======================================" assert(False) if type(merge_func) != str: print "Merge function_name error" print "Merge_function name: ", merge_func if type(work_range) != dict: print "work_range error" print "work_range: ", work_range assert(False) input_argument_check() # initialization ############################################################## global unique_id # share argument packages # and send data to reader def share_argument_package_list(arugment_package_list): def share_argument_package(argument_package): if argument_package.get_unique_id() == '-1': # skip, small variables pass elif argument_package.shared == False: # not registered variables def reader_give_access(data_package): #scheduler_inform(data_package, 2) u = data_package.unique_id scheduler_retain(data_package) out_of_core = data_package.out_of_core if out_of_core: scheduler_notice_data_out_of_core(data_package) else: send_data(2, data_package.data, data_package) reader_give_access(argument_package) argument_package.shared = True for argument_package in argument_package_list: share_argument_package(argument_package) share_argument_package_list(argument_package_list) # get return package def get_return_package(function_name, argument_package_list, work_range, output_halo): data_package = Data_package() def get_unique_id(): global unique_id unique_id += 1 return unique_id data_package.unique_id = get_unique_id() data_package.data_dtype = numpy.ndarray data_package.data_halo = output_halo def get_return_dtype(function_name, argument_package_list): from Vivaldi_translator_layer import get_return_dtype function_code = function_code_dict[function_name] return_dtype = get_return_dtype(function_name, argument_package_list, function_code) if return_dtype.endswith('_volume'): print "Vivaldi_warning" print "---------------------------------" print "Check your function" print "you are trying to return a volume" print "return_dtype: ", return_dtype print "---------------------------------" return return_dtype return_dtype = get_return_dtype(function_name, argument_package_list) data_package.set_data_contents_dtype(return_dtype) data_package.set_full_data_range(work_range) data_package.set_data_range(work_range) data_package.halo = output_halo data_package.split = output_split data_package.shared = True return data_package return_package = get_return_package(function_name, argument_package_list, work_range, output_halo) # register return package to data_package_list def register_return_package(key, return_package): if key in data_package_list: # cannot happen pass else: data_package_list[key] = return_package register_return_package(id(return_package), return_package) # register function to scheduler def get_function_package(function_name, argument_package_list, return_package, work_range, merge_func='', merge_order=''): fp = Function_package() fp.set_function_name(function_name) fp.set_function_args(argument_package_list) from OpenGL.GL import glGetFloatv, GL_MODELVIEW_MATRIX fp.mmtx = glGetFloatv(GL_MODELVIEW_MATRIX) def flip_diagonal(mmtx): new_mmtx = numpy.empty((4,4),dtype=numpy.float32) for i in range(4): for j in range(4): new_mmtx[i][j] = mmtx[j][i] return new_mmtx fp.mmtx = flip_diagonal(fp.mmtx) fp.work_range = work_range fp.output = return_package return fp function_package = get_function_package(function_name, argument_package_list, return_package, work_range, merge_func, merge_order) # setting viewer-src if Vivaldi_viewer.v != None: mmtx = Vivaldi_viewer.mmtx inv_mmtx = Vivaldi_viewer.inv_mmtx v = Vivaldi_viewer.v trans_on = Vivaldi_viewer.trans_on transN = 0 fp = function_package if v != None: trans_on = Vivaldi_viewer.trans_on fp.transN = Vivaldi_viewer.transN if v.slider != None: fp.Sliders = v.get_sliders() fp.Slider_opacity = v.get_slider_opacity() # if trans_on == True: if v.getIsTFupdated() == 1: fp.trans_tex = v.getTFF() fp.update_tf = 1 fp.update_tf2 = 0 v.window.TFF.updated = 0 elif v.getIsTFupdated2() == 1: fp.trans_tex = v.getTFF2() fp.update_tf = 0 fp.update_tf2 = 1 v.window.TFF2.updated = 0 # fp.TF_bandwidth = v.getTFBW() register_function_package(function_package) if merge_func != '': input_package = return_package.copy() input_package.set_data_range(input_package.full_data_range) # function name check if merge_func not in function_code_dict: print "Vivaldi warning" print "=================================" print "function: ",merge_func,"not exist" print "=================================" assert(False) # make function package merge_function_package = Function_package() # set function name merge_function_package.set_function_name(merge_func) # set work_range merge_function_package.work_range = input_package.full_data_range def get_merge_package_args(input_package, merge_func): def get_argument_list(function_name): function_code = function_code_dict[merge_func] def get_args(name, code): idx_start = code.find(name) + len(name) + 1 idx_end = code.find(')', idx_start) args = code[idx_start:idx_end] return args.strip() function_args = get_args(function_name, function_code) if function_args == '': print "Vivaldi warning" print "=================================" print "There are no function argument" print "=================================" assert(False) argument_list = [] for arg in function_args.split(','): argument_list.append(arg.strip()) return argument_list argument_list = get_argument_list(merge_func) argument_package_list = [] for arg in argument_list: argument_package = None if arg in AXIS: argument_package = Data_package(arg) else: argument_package = input_package.copy() argument_package_list.append(argument_package) return argument_package_list merge_argument_package_list = get_merge_package_args(input_package, merge_func) # set argument merge_function_package.set_args(merge_argument_package_list) # set return package def get_return_dtype(function_name, argument_package_list): from Vivaldi_translator_layer import get_return_dtype function_code = function_code_dict[function_name] return_dtype = get_return_dtype(function_name, argument_package_list, function_code) if return_dtype.endswith('_volume'): print "Vivaldi_warning" print "---------------------------------" print "Check your function" print "you are trying to return a volume" print "return_dtype: ", return_dtype print "---------------------------------" return return_dtype return_dtype = get_return_dtype(merge_func, merge_argument_package_list) return_package.set_dtype(return_dtype) merge_function_package.output = return_package # split count def get_split_count(argument_package_list): cnt = 1 for argument_package in argument_package_list: uid = argument_package.get_unique_id() if uid != '-1': split = argument_package.split for axis in split: cnt *= split[axis] return cnt n = get_split_count(argument_package_list) # ask scheduler to merge scheduler_merge(merge_function_package, n) scheduler_retain(return_package) return return_package
def parallel(function_name='', argument_package_list=[], work_range={}, execid=[], output_halo=0, output_split={}, merge_func='', merge_order=''): # compatibility to old versions ############################################################ function_name = function_name.strip() def to_range(input): dtype = type(input) if type(input) == numpy.ndarray: input = list(input.shape) dtype = type(input) n = len(input) if input[n-1] in [1,2,3]: input.pop() if dtype in [tuple, list]: return shape_to_range(input) if isinstance(input, Data_package): dp = input work_range = apply_halo(dp.data_range, -dp.data_halo) return work_range if dtype == dict: return input return {} work_range = to_range(work_range) # input argument error check def input_argument_check(): if type(function_name) != str or function_name == '': print "Function_name error" print "function_name: ", function_name if function_name not in function_code_dict: print "======================================" print "Vivaldi Warning" print "the function: " + function_name + " not exist" print "======================================" assert(False) if type(merge_func) != str: print "Merge function_name error" print "Merge_function name: ", merge_func if type(work_range) != dict: print "work_range error" print "work_range: ", work_range assert(False) input_argument_check() # initialization ############################################################## global unique_id # share argument packages # and send data to reader def share_argument_package_list(arugment_package_list): def share_argument_package(argument_package): if argument_package.get_unique_id() == '-1': # skip, small variables pass elif argument_package.shared == False: # not registered variables def reader_give_access(data_package): #scheduler_inform(data_package, 2) u = data_package.unique_id scheduler_retain(data_package) out_of_core = data_package.out_of_core if out_of_core: scheduler_notice_data_out_of_core(data_package) else: send_data(2, data_package.data, data_package) reader_give_access(argument_package) argument_package.shared = True for argument_package in argument_package_list: share_argument_package(argument_package) share_argument_package_list(argument_package_list) # get return package def get_return_package(function_name, argument_package_list, work_range, output_halo): data_package = Data_package() def get_unique_id(): global unique_id unique_id += 1 return unique_id data_package.unique_id = get_unique_id() data_package.data_dtype = numpy.ndarray data_package.data_halo = output_halo def get_return_dtype(function_name, argument_package_list): from Vivaldi_translator_layer import get_return_dtype function_code = function_code_dict[function_name] return_dtype = get_return_dtype(function_name, argument_package_list, function_code) if return_dtype.endswith('_volume'): print "Vivaldi_warning" print "---------------------------------" print "Check your function" print "you are trying to return a volume" print "return_dtype: ", return_dtype print "---------------------------------" return return_dtype return_dtype = get_return_dtype(function_name, argument_package_list) data_package.set_data_contents_dtype(return_dtype) data_package.set_full_data_range(work_range) data_package.set_data_range(work_range) data_package.halo = output_halo data_package.split = output_split data_package.shared = True return data_package return_package = get_return_package(function_name, argument_package_list, work_range, output_halo) # register return package to data_package_list def register_return_package(key, return_package): if key in data_package_list: # cannot happen pass else: data_package_list[key] = return_package register_return_package(id(return_package), return_package) # register function to scheduler def get_function_package(function_name, argument_package_list, return_package, work_range, merge_func='', merge_order=''): fp = Function_package() fp.set_function_name(function_name) fp.set_function_args(argument_package_list) from OpenGL.GL import glGetFloatv, GL_MODELVIEW_MATRIX fp.mmtx = glGetFloatv(GL_MODELVIEW_MATRIX) def flip_diagonal(mmtx): new_mmtx = numpy.empty((4,4),dtype=numpy.float32) for i in range(4): for j in range(4): new_mmtx[i][j] = mmtx[j][i] return new_mmtx fp.mmtx = flip_diagonal(fp.mmtx) fp.work_range = work_range fp.output = return_package return fp function_package = get_function_package(function_name, argument_package_list, return_package, work_range, merge_func, merge_order) # setting viewer-src if Vivaldi_viewer.v != None: mmtx = Vivaldi_viewer.mmtx inv_mmtx = Vivaldi_viewer.inv_mmtx v = Vivaldi_viewer.v trans_on = Vivaldi_viewer.trans_on transN = 0 fp = function_package if v != None: trans_on = Vivaldi_viewer.trans_on fp.transN = Vivaldi_viewer.transN if v.slider != None: fp.Sliders = v.get_sliders() fp.Slider_opacity = v.get_slider_opacity() if trans_on == True: if v.getIsTFupdated() == 1: fp.trans_tex = v.getTFF() fp.update_tf = 1 fp.update_tf2 = 0 v.window.TFF.updated = 0 elif v.getIsTFupdated2() == 1: fp.trans_tex = v.getTFF2() fp.update_tf = 0 fp.update_tf2 = 1 v.window.TFF2.updated = 0 fp.TF_bandwidth = v.getTFBW() register_function_package(function_package) if merge_func != '': input_package = return_package.copy() input_package.set_data_range(input_package.full_data_range) # function name check if merge_func not in function_code_dict: print "Vivaldi warning" print "=================================" print "function: ",merge_func,"not exist" print "=================================" assert(False) # make function package merge_function_package = Function_package() # set function name merge_function_package.set_function_name(merge_func) # set work_range merge_function_package.work_range = input_package.full_data_range def get_merge_package_args(input_package, merge_func): def get_argument_list(function_name): function_code = function_code_dict[merge_func] def get_args(name, code): idx_start = code.find(name) + len(name) + 1 idx_end = code.find(')', idx_start) args = code[idx_start:idx_end] return args.strip() function_args = get_args(function_name, function_code) if function_args == '': print "Vivaldi warning" print "=================================" print "There are no function argument" print "=================================" assert(False) argument_list = [] for arg in function_args.split(','): argument_list.append(arg.strip()) return argument_list argument_list = get_argument_list(merge_func) argument_package_list = [] for arg in argument_list: argument_package = None if arg in AXIS: argument_package = Data_package(arg) else: argument_package = input_package.copy() argument_package_list.append(argument_package) return argument_package_list merge_argument_package_list = get_merge_package_args(input_package, merge_func) # set argument merge_function_package.set_args(merge_argument_package_list) # set return package def get_return_dtype(function_name, argument_package_list): from Vivaldi_translator_layer import get_return_dtype function_code = function_code_dict[function_name] return_dtype = get_return_dtype(function_name, argument_package_list, function_code) if return_dtype.endswith('_volume'): print "Vivaldi_warning" print "---------------------------------" print "Check your function" print "you are trying to return a volume" print "return_dtype: ", return_dtype print "---------------------------------" return return_dtype return_dtype = get_return_dtype(merge_func, merge_argument_package_list) return_package.set_dtype(return_dtype) merge_function_package.output = return_package # split count def get_split_count(argument_package_list): cnt = 1 for argument_package in argument_package_list: uid = argument_package.get_unique_id() if uid != '-1': split = argument_package.split for axis in split: cnt *= split[axis] return cnt n = get_split_count(argument_package_list) # ask scheduler to merge scheduler_merge(merge_function_package, n) scheduler_retain(return_package) return return_package