def export_parameters_using_attributes(self, resources, datatype, res_type=None): """ Export scalars or descriptors. """ islink = res_type == 'LINK' attributes = [] attr_names = [] for resource in resources: for attr in resource.attributes: if attr.dataset_type == datatype and attr.is_var is False: translated_attr_name = translate_attr_name(attr.name) attr.name = translated_attr_name if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: for attribute in attributes: nname = "\nparam " + attribute.name + ':=' contents = [] #self.output_file_contents.append("\nparam "+attribute.name+':=') for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.value is None or attr.dataset_type != datatype: continue if res_type is None: name = resource.name elif islink is True and self.links_as_name is False: name = get_link_name_for_param(resource) else: name = "" #self.output_file_contents.append("\n "+name+" "+str(attr.value.values()[0][0])) contents.append("\n " + self.ff.format(name) + self.ff.format(str(attr.value))) if len(contents) > 0: self.output_file_contents.append(nname) for st in contents: self.output_file_contents.append(st) self.output_file_contents.append(';\n')
def export_parameters_using_attributes(self, resources, datatype, res_type=None): """ Export scalars or descriptors. """ islink = res_type == 'LINK' attributes = [] attr_names = [] for resource in resources: for attr in resource.attributes: if attr.dataset_type == datatype and attr.is_var is False: translated_attr_name = translate_attr_name(attr.name) attr.name = translated_attr_name if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: for attribute in attributes: nname="\nparam "+attribute.name+':=' contents=[] #self.output_file_contents.append("\nparam "+attribute.name+':=') for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.value is None or attr.dataset_type != datatype: continue if res_type is None: name=resource.name elif islink is True and self.links_as_name is False: name=get_link_name_for_param(resource) else: name="" #self.output_file_contents.append("\n "+name+" "+str(attr.value.values()[0][0])) contents.append("\n "+self.ff.format(name)+self.ff.format(str(attr.value))) if len(contents)>0: self.output_file_contents.append(nname) for st in contents: self.output_file_contents.append(st) self.output_file_contents.append(';\n')
def export_arrays(self, resources): """Export arrays. """ attributes = [] attr_names = [] for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'array' and attr.is_var is False: attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: # We have to write the complete array information for every single # node, because they might have different sizes. for resource in resources: # This exporter only supports 'rectangular' arrays for attribute in attributes: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.value is not None: array=json.loads(attr.value) dim = self.get_dim(array) self.output_file_contents.append('# Array %s for node %s, ' % \ (attr.name, resource.name)) self.output_file_contents.append('dimensions are %s\n\n' % dim) # Generate array indices #self.output_file_contents.append('SETS:=\n\n') indexvars = list(ascii_lowercase) for i, n in enumerate(dim): self.output_file_contents.append("set "+ indexvars[i] + '_' + \ resource.name + '_' + attr.name + \ '_'+str(i)+':=\n') for idx in range(n): self.output_file_contents.append(str(idx) + '\n') self.output_file_contents.append(';\n\n') self.output_file_contents.append('param ' + resource.name + '_' + \ attr.name + ':=') ydim = dim[-1] if len(dim)>1: for y in range(ydim): self.output_file_contents.append('{0:20}'.format(y)) self.output_file_contents.append('\n') i=0 count=0 for item in array: self.output_file_contents.append("\n") self.output_file_contents.append('{0:20}'.format("")) if(type(item) is list): self.output_file_contents.append(format('['+str(i)+','+str(i)+']')) i+=1 for value in item: self.output_file_contents.append(' {0:20}'.format(value)) else: i+=1 self.output_file_contents.append('{0:20}'.format(item)) count+=1 self.output_file_contents.append(';\n') self.output_file_contents.append('\n\n')
def export_timeseries_using_attributes(self, resources, res_type=None): """ Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attrb_tables={} for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: if(len(self.time_index) is 0): raise HydraPluginError("Missing time axis or start date, end date and time step or bad format") attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attrb_tables[attr.name]=attr attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: dataset_ids = [] all_res_data={} #Identify the datasets that we need data for for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.dataset_id is not None: dataset_ids.append(attr.dataset_id) value=json.loads(attr.value) all_res_data[attr.dataset_id]=value #We need to get the value at each time in the specified time axis, #so we need to identify the relevant timestamps. soap_times = [] for t, timestamp in enumerate(self.time_index.values()): soap_times.append(date_to_string(timestamp)) #Get all the necessary data for all the datasets we have. #all_data = self.connection.call('get_multiple_vals_at_time', # {'dataset_ids':dataset_ids, # 'timestamps' : soap_times}) for attribute in attributes: self.output_file_contents.append("\nparam "+attribute.name+":\n") self.output_file_contents.append(self.write_time()) for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.dataset_id is None or attr.dataset_type != 'timeseries': continue try: all_data = self.get_time_value(attr.value, self.time_index.values()) except Exception, e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError("Error finding value attribute %s on" "resource %s"%(attr.name, resource.name)) name=resource.name if islink is True and self.links_as_name is False: name=get_link_name(resource) #self.output_file_contents.append("\n "+name) nname="\n "+name; self.output_file_contents.append(self.ff.format(nname)) for timestamp in self.time_index.values(): tmp = all_data[timestamp] if isinstance(tmp, list): data="-".join(tmp) ff_='{0:<'+self.array_len+'}' data_str = ff_.format(str(data)) else: data=str(tmp) data_str = self.ff.format(str(float(data))) self.output_file_contents.append(data_str) self.output_file_contents.append(';\n')
def export_arrays(self, resources): """Export arrays. """ attributes = [] attr_names = [] for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'array' and attr.is_var is False: attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: # We have to write the complete array information for every single # node, because they might have different sizes. for resource in resources: # This exporter only supports 'rectangular' arrays for attribute in attributes: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.value is not None: array = json.loads(attr.value) dim = self.get_dim(array) self.output_file_contents.append('# Array %s for node %s, ' % \ (attr.name, resource.name)) self.output_file_contents.append( 'dimensions are %s\n\n' % dim) # Generate array indices #self.output_file_contents.append('SETS:=\n\n') indexvars = list(ascii_lowercase) for i, n in enumerate(dim): self.output_file_contents.append("set "+ indexvars[i] + '_' + \ resource.name + '_' + attr.name + \ '_'+str(i)+':=\n') for idx in range(n): self.output_file_contents.append( str(idx) + '\n') self.output_file_contents.append(';\n\n') self.output_file_contents.append('param ' + resource.name + '_' + \ attr.name + ':=') ydim = dim[-1] if len(dim) > 1: for y in range(ydim): self.output_file_contents.append( '{0:20}'.format(y)) self.output_file_contents.append('\n') i = 0 count = 0 for item in array: self.output_file_contents.append("\n") self.output_file_contents.append( '{0:20}'.format("")) if (type(item) is list): self.output_file_contents.append( format('[' + str(i) + ',' + str(i) + ']')) i += 1 for value in item: self.output_file_contents.append( ' {0:20}'.format(value)) else: i += 1 self.output_file_contents.append( '{0:20}'.format(item)) count += 1 self.output_file_contents.append(';\n') self.output_file_contents.append('\n\n')
def export_timeseries_using_attributes(self, resources, res_type=None): """ Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attrb_tables = {} for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: if (len(self.time_index) is 0): raise HydraPluginError( "Missing time axis or start date, end date and time step or bad format" ) attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attrb_tables[attr.name] = attr attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: dataset_ids = [] all_res_data = {} #Identify the datasets that we need data for for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.dataset_id is not None: dataset_ids.append(attr.dataset_id) value = json.loads(attr.value) all_res_data[attr.dataset_id] = value #We need to get the value at each time in the specified time axis, #so we need to identify the relevant timestamps. soap_times = [] for t, timestamp in enumerate(self.time_index.values()): soap_times.append(date_to_string(timestamp)) #Get all the necessary data for all the datasets we have. #all_data = self.connection.call('get_multiple_vals_at_time', # {'dataset_ids':dataset_ids, # 'timestamps' : soap_times}) for attribute in attributes: self.output_file_contents.append("\nparam " + attribute.name + ":\n") self.output_file_contents.append(self.write_time()) for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.dataset_id is None or attr.dataset_type != 'timeseries': continue try: all_data = self.get_time_value( attr.value, self.time_index.values()) except Exception, e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError( "Error finding value attribute %s on" "resource %s" % (attr.name, resource.name)) name = resource.name if islink is True and self.links_as_name is False: name = get_link_name(resource) #self.output_file_contents.append("\n "+name) nname = "\n " + name self.output_file_contents.append(self.ff.format(nname)) for timestamp in self.time_index.values(): tmp = all_data[timestamp] if isinstance(tmp, list): data = "-".join(tmp) ff_ = '{0:<' + self.array_len + '}' data_str = ff_.format(str(data)) else: data = str(tmp) data_str = self.ff.format(str(float(data))) self.output_file_contents.append(data_str) self.output_file_contents.append(';\n')