def set_time_table(self, times): for date_time in times: if date_time in self.time_table: pass else: if date_time.startswith("XXXX"): self.time_table[date_time]=date_to_string(parse(date_time.replace("XXXX","1900"))) elif date_time.startswith("9999"): self.time_table[date_time]=date_to_string(parse(date_time.replace("9999","1900"))) else: self.time_table[date_time]=date_to_string(parse(date_time))
def set_time_table(self, times): for date_time in times: if date_time in self.time_table: pass else: if date_time.startswith("XXXX"): self.time_table[date_time] = date_to_string( parse(date_time.replace("XXXX", "1900"))) elif date_time.startswith("9999"): self.time_table[date_time] = date_to_string( parse(date_time.replace("9999", "1900"))) else: self.time_table[date_time] = date_to_string( parse(date_time))
def export_timeseries_using_attributes(self, resources, res_type=None): """ Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attrb_tables={} for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: if(len(self.time_index) is 0): raise HydraPluginError("Missing time axis or start date, end date and time step or bad format") attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attrb_tables[attr.name]=attr attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: dataset_ids = [] all_res_data={} #Identify the datasets that we need data for for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.dataset_id is not None: dataset_ids.append(attr.dataset_id) value=json.loads(attr.value) all_res_data[attr.dataset_id]=value #We need to get the value at each time in the specified time axis, #so we need to identify the relevant timestamps. soap_times = [] for t, timestamp in enumerate(self.time_index.values()): soap_times.append(date_to_string(timestamp)) #Get all the necessary data for all the datasets we have. #all_data = self.connection.call('get_multiple_vals_at_time', # {'dataset_ids':dataset_ids, # 'timestamps' : soap_times}) for attribute in attributes: self.output_file_contents.append("\nparam "+attribute.name+":\n") self.output_file_contents.append(self.write_time()) for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.dataset_id is None or attr.dataset_type != 'timeseries': continue try: all_data = self.get_time_value(attr.value, self.time_index.values()) except Exception, e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError("Error finding value attribute %s on" "resource %s"%(attr.name, resource.name)) name=resource.name if islink is True and self.links_as_name is False: name=get_link_name(resource) #self.output_file_contents.append("\n "+name) nname="\n "+name; self.output_file_contents.append(self.ff.format(nname)) for timestamp in self.time_index.values(): tmp = all_data[timestamp] if isinstance(tmp, list): data="-".join(tmp) ff_='{0:<'+self.array_len+'}' data_str = ff_.format(str(data)) else: data=str(tmp) data_str = self.ff.format(str(float(data))) self.output_file_contents.append(data_str) self.output_file_contents.append(';\n')
def export_timeseries_using_attributes(self, resources, res_type=None): """ Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attrb_tables = {} for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: if (len(self.time_index) is 0): raise HydraPluginError( "Missing time axis or start date, end date and time step or bad format" ) attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attrb_tables[attr.name] = attr attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: dataset_ids = [] all_res_data = {} #Identify the datasets that we need data for for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.dataset_id is not None: dataset_ids.append(attr.dataset_id) value = json.loads(attr.value) all_res_data[attr.dataset_id] = value #We need to get the value at each time in the specified time axis, #so we need to identify the relevant timestamps. soap_times = [] for t, timestamp in enumerate(self.time_index.values()): soap_times.append(date_to_string(timestamp)) #Get all the necessary data for all the datasets we have. #all_data = self.connection.call('get_multiple_vals_at_time', # {'dataset_ids':dataset_ids, # 'timestamps' : soap_times}) for attribute in attributes: self.output_file_contents.append("\nparam " + attribute.name + ":\n") self.output_file_contents.append(self.write_time()) for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is None or attr.dataset_id is None or attr.dataset_type != 'timeseries': continue try: all_data = self.get_time_value( attr.value, self.time_index.values()) except Exception, e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError( "Error finding value attribute %s on" "resource %s" % (attr.name, resource.name)) name = resource.name if islink is True and self.links_as_name is False: name = get_link_name(resource) #self.output_file_contents.append("\n "+name) nname = "\n " + name self.output_file_contents.append(self.ff.format(nname)) for timestamp in self.time_index.values(): tmp = all_data[timestamp] if isinstance(tmp, list): data = "-".join(tmp) ff_ = '{0:<' + self.array_len + '}' data_str = ff_.format(str(data)) else: data = str(tmp) data_str = self.ff.format(str(float(data))) self.output_file_contents.append(data_str) self.output_file_contents.append(';\n')