def check_header(file, header): """ Check for common mistakes in headers: Duplicate columns Empty columns """ if type(header) == str: header = header.split(',') for i, h in enumerate(header): if h.strip() == '': raise HydraPluginError( "Malformed Header in %s: Column(s) %s is empty" % (file, i)) individual_headings = [] dupe_headings = [] for k in header: if k not in individual_headings: individual_headings.append(k) else: dupe_headings.append(k) if len(dupe_headings) > 0: raise HydraPluginError( "Malformed Header in file %s: Duplicate columns: %s" % (file, dupe_headings))
def validate_plugin_xml(plugin_xml_file_path): log.info('Validating plugin xml file (%s).' % plugin_xml_file_path) try: with open(plugin_xml_file_path) as f: plugin_xml = f.read() except: raise HydraPluginError("Couldn't find plugin.xml.") try: plugin_xsd_path = os.path.expanduser( config.get('plugin', 'plugin_xsd_path')) log.info("Plugin Input xsd: %s", plugin_xsd_path) xmlschema_doc = etree.parse(plugin_xsd_path) xmlschema = etree.XMLSchema(xmlschema_doc) xml_tree = etree.fromstring(plugin_xml) except XMLSyntaxError as e: raise HydraPluginError("There is an error in your XML syntax: %s" % e) except ParseError as e: raise HydraPluginError("There is an error in your XML: %s" % e) except Exception as e: log.exception(e) raise HydraPluginError( "An unknown error occurred with the plugin xsd: %s" % e.message) try: xmlschema.assertValid(xml_tree) except etree.DocumentInvalid as e: raise HydraPluginError('Plugin validation failed: ' + e.message) log.info("Plugin XML OK")
def check_args(args): if args.network == None: raise HydraPluginError('No network is specified') elif args.scenario == None: raise HydraPluginError('No senario is specified') elif args.gms_file is None: raise HydraPluginError('Gams file is not specifed') elif os.path.isfile(args.gms_file) == False: raise HydraPluginError('Gams file: ' + args.gms_file + ', does not exist')
def parse_time_step(self, time_step, target='s'): """ Read in the time step and convert it to seconds. """ log.info("Parsing time step %s", time_step) # export numerical value from string using regex value = re.findall(r'\d+', time_step)[0] valuelen = len(value) try: value = float(value) except: HydraPluginError( "Unable to extract number of time steps (%s) from time step %s" % (value, time_step)) units = time_step[valuelen:].strip() period = get_time_period(units) log.info("Time period is %s", period) converted_time_step = units.convert(value, period, target) log.info("Time period is %s %s", converted_time_step, period) return float(converted_time_step), value, period
def __init__(self, args, connection=None): import gdxcc self.gdxcc = gdxcc self.gdx_handle = gdxcc.new_gdxHandle_tp() rc = gdxcc.gdxCreate(self.gdx_handle, gdxcc.GMS_SSSIZE) log.info("=============================>" + str(rc)) if rc[0] == 0: raise HydraPluginError('Could not find GAMS installation.') self.symbol_count = 0 self.element_count = 0 self.gdx_variables = dict() self.gams_units = dict() self.gdx_ts_vars = dict() self.network_id = args.network_id self.scenario_id = args.scenario_id self.network = None self.res_scenario = None self.attrs = dict() self.time_axis = dict() self.gms_data = [] self.connection = connection if self.connection is None: self.connect(args) attrslist = self.connection.call('get_all_attributes', {}) for attr in attrslist: self.attrs.update({attr.id: attr.name})
def run_gams_model(args): log.info("Running GAMS model .....") cur_time = datetime.now().replace(microsecond=0) write_progress(6, steps) working_directory = os.path.dirname(args.gms_file) if working_directory == '': working_directory = '.' model = GamsModel(args.gams_path, working_directory) write_progress(7, steps) model.add_job(args.gms_file) write_progress(8, steps) model.run() write_progress(9, steps) log.info("Running GAMS model finsihed") # if result file is not provided, it looks for it automatically at GAMS WD if args.gdx_file is None: log.info("Extracting results from %s.", working_directory) files_list = get_files_list(working_directory, '.gdx') for file_ in files_list: dt = parser.parse(files_list[file_]) delta = (dt - cur_time).total_seconds() if delta >= 0: args.gdx_file = os.path.join(working_directory, file_) if args.gdx_file is None: raise HydraPluginError('Result file is not provided/found.')
def export_resourcegroupitems(self, scenario, group_map, node_map, link_map): """ Export the members of a group in a given scenario. """ group_member_file = open(os.path.join(scenario.target_dir, "group_members.csv"), 'w') group_member_heading = "Name, Type, Member\n" group_member_entries = [] for group_member in scenario.resourcegroupitems: group_name = group_map[group_member.group_id] member_type = group_member.ref_key if member_type == 'LINK': member_name = link_map[group_member.ref_id] elif member_type == 'NODE': member_name = node_map[group_member.ref_id] elif member_type == 'GROUP': member_name = group_map[group_member.ref_id] else: raise HydraPluginError('Unrecognised group member type: %s'%(member_type)) group_member_str = "%(group)s, %(type)s, %(member_name)s\n" % { 'group': group_name, 'type' : member_type, 'member_name' : member_name, } group_member_entries.append(group_member_str) group_member_file.write(group_member_heading) group_member_file.writelines(group_member_entries)
def check_args(args): try: int(args.network_id) except (TypeError, ValueError): raise HydraPluginError('No network is specified') try: int(args.scenario_id) except (TypeError, ValueError): raise HydraPluginError('No scenario is specified') output = os.path.dirname(args.output) if output == '': output = '.' if os.path.exists(output) == False: raise HydraPluginError('Output file directory ' + os.path.dirname(args.output) + 'does not exist')
def get_time_axis(self, start_time, end_time, time_step, time_axis=None): """ Create a list of datetimes based on an start time, end time and time step. If such a list is already passed in, then this is not necessary. Often either the start_time, end_time, time_step is passed into an app or the time_axis is passed in directly. This function returns a time_axis in both situations. """ if time_axis is not None: actual_dates_axis = [] for t in time_axis: #If the user has entered the time_axis with commas, remove them. t = t.replace(',', '').strip() if t == '': continue actual_dates_axis.append(get_datetime(t)) return actual_dates_axis else: if start_time is None: raise HydraPluginError("A start time must be specified") if end_time is None: raise HydraPluginError("And end time must be specified") if time_step is None: raise HydraPluginError("A time-step must be specified") start_date = get_datetime(start_time) end_date = get_datetime(end_time) delta_t, value, units = self.parse_time_step(time_step) time_axis = [start_date] value = int(value) while start_date < end_date: #Months and years are a special case, so treat them differently if (units.lower() == "mon"): start_date = start_date + relativedelta(months=value) elif (units.lower() == "yr"): start_date = start_date + relativedelta(years=value) else: start_date += timedelta(seconds=delta_t) time_axis.append(start_date) return time_axis
def validate_value(value, restriction_dict): if restriction_dict is None or restriction_dict == {}: return try: util.validate_value(restriction_dict, value) except HydraError as e: log.exception(e) raise HydraPluginError(e.message)
def open_gdx_file(self, filename): """ Open the GDX file and read some basic information. """ if filename is None: raise HydraPluginError("gdx file not specified.") filename = os.path.abspath(filename) self.gdxcc.gdxOpenRead(self.gdx_handle, filename) x, self.symbol_count, self.element_count = \ self.gdxcc.gdxSystemInfo(self.gdx_handle) if x != 1: raise HydraPluginError('GDX file could not be opened.') log.info('Importing %s symbols and %s elements.' % (self.symbol_count, self.element_count))
def get_network(self, is_licensed): net = self.connection.call( 'get_network', { 'network_id': self.network_id, 'include_data': 'Y', 'template_id': self.template_id, 'scenario_ids': [self.scenario_id] }) self.hydranetwork = net log.info("Network retrieved") if net.scenarios is not None: for s in net.scenarios: if s.id == self.scenario_id: self.scenario = s self.network = GAMSnetwork() log.info("Loading net into gams network.") self.network.load(net, self.attrs) if (is_licensed is False): if len(self.network.nodes) > 20: raise HydraPluginError( "The licence is limited demo (maximum limits are 20 nodes and 20 times steps). Please contact software vendor ([email protected]) to get a full licence" ) if self.time_axis is not None and len(self.time_axis) > 20: raise HydraPluginError( "The licence is limited demo (maximum limits are 20 nodes and 20 times steps). Please contact software vendor ([email protected]) to get a full licence" ) log.info("Gams network loaded") self.network.gams_names_for_links(use_link_name=self.links_as_name) log.info("Names for links retrieved") self.output = """* Data exported from Hydra using GAMSplugin. * (c) Copyright 2015, University of Manchester * * %s: %s * Network-ID: %s * Scenario-ID: %s ******************************************************************************* """ % (self.network.name, self.network.description, self.network.ID, self.network.scenario_id)
def set_network(self, is_licensed, network): """ Load network and scenario from the server. """ self.is_licensed = is_licensed self.network = network self.res_scenario = self.network.scenarios[0].resourcescenarios if (is_licensed is False): if len(self.network.nodes) > 20: raise HydraPluginError( "The licence is limited demo (maximum limits are 20 nodes and 20 times steps). Please contact software vendor ([email protected]) to get a full licence" )
def load_gams_file(self, gms_file): """Read in the .gms file. """ if gms_file is None: raise HydraPluginError(".gms file not specified.") gms_file = os.path.abspath(gms_file) gms_data = import_gms_data(gms_file) self.gms_data = gms_data.split('\n') if self.network_id is None or self.scenario_id is None: self.network_id, self.scenario_id = self.get_ids_from_gms()
def load_network(self, is_licensed, network_id=None, scenario_id=None): """ Load network and scenario from the server. If the network has been set externally (to save getting it again) then simply set this.res_scenario using the existing network """ # Use the network id specified by the user, if it is None, fall back to # the network id read from the gms file self.is_licensed = is_licensed try: network_id = int(network_id) except (TypeError, ValueError): network_id = self.network_id if network_id is None: raise HydraPluginError("No network specified.") try: scenario_id = int(scenario_id) except (TypeError, ValueError): scenario_id = self.scenario_id if scenario_id is None: raise HydraPluginError("No scenario specified.") self.network = self.connection.call( 'get_network', { 'network_id': int(network_id), 'include_data': 'Y', 'scenario_ids': [int(scenario_id)], 'template_id': None }) self.res_scenario = self.network.scenarios[0].resourcescenarios if (is_licensed is False): if len(self.network.nodes) > 20: raise HydraPluginError( "The licence is limited demo (maximum limits are 20 nodes and 20 times steps). Please contact software vendor ([email protected]) to get a full licence" )
def get_file_data(file): """ Taking a csv file as an argument, return an array where each element is a line in the csv. """ file_data = None if file == None: log.warn("No file specified") return None file = os.path.realpath(file) log.info("Reading file data from: %s", file) with open(file, mode='r') as csv_file: raw_file_data = csv_file.read() file_data = re.sub(' *, *', ',', raw_file_data) file_data = file_data.split('\n') if len(file_data) == 0: log.warn("File contains no data") new_file_data = [] bad_lines = [] for i, line in enumerate(file_data): line = line.strip() # Ignore comments if len(line) == 0 or line[0] == '#': continue try: line = ''.join([x if ord(x) < 128 else ' ' for x in line]) new_file_data.append(line) except UnicodeDecodeError as e: #If there are unknown characters in this line, save the line #and the column in the line where the bad character has occurred. bad_lines.append((i + 1, e.start)) #Complain about the lines that the bad characters are on. if len(bad_lines) > 0: lines = [a[0] for a in bad_lines] raise HydraPluginError("Lines %s, in %s contain non ascii characters" % (lines, file)) return new_file_data
def parse_time_index(self): """ Read the time index of the GAMS model used. This only works for models where data is exported from Hydra using GAMSexport. """ time_index_type = None for i, line in enumerate(self.gms_data): #if line[0:24] == 'Parameter timestamp(t) ;': # break if line.strip().startswith('Parameter timestamp(yr, mn, dy)'): time_index_type = 'date' break elif line.strip().startswith('Parameter timestamp(t)'): time_index_type = 't_index' break if time_index_type is "t_index": i += 2 line = self.gms_data[i] while line.split('(', 1)[0].strip() == 'timestamp': idx = int(line.split('"')[1]) timestamp = ordinal_to_timestamp(Decimal(line.split()[2])) timestamp = date_to_string(timestamp) self.time_axis.update({idx: timestamp}) i += 1 line = self.gms_data[i] elif time_index_type is "date": i += 2 line = self.gms_data[i] while line.strip().startswith("timestamp"): line_parts = line.split("=") timestamp = ordinal_to_timestamp( Decimal(line_parts[1].replace(";", ""))) #idx=[timestamp.year, timestamp.month, timestamp.day] idx = str(timestamp.year) + "." + str( timestamp.month) + "." + str(timestamp.day) timestamp = date_to_string(timestamp) self.time_axis.update({idx: timestamp}) i += 1 line = self.gms_data[i] if (self.is_licensed is False): if len(self.time_axis) > 20: raise HydraPluginError( "The licence is limited demo (maximum limits are 20 nodes and 20 times steps). Please contact software vendor ([email protected]) to get a full licence" )
def is_timeseries(data): """ Check whether a piece of data is a timeseries by trying to guess its date format. If that fails, it's not a time series. """ try: date = data[0][0] global time_formats timeformat = time_formats.get(date) if timeformat is None: timeformat = hydra_dateutil.guess_timefmt(date) time_formats[date] = timeformat if timeformat is None: return False else: return True except: raise HydraPluginError("Unable to parse timeseries %s" % data)
def __init__(self, gamspath, working_directory): if (gamspath == None): gamspath = get_gams_path() log.info("Using GAMS Path: %s", gamspath) try: real_path = os.path.realpath(os.path.abspath(gamspath)) api_path = os.path.join(real_path, 'apifiles', 'Python', 'api') if api_path not in sys.path: sys.path.insert(0, api_path) from gams import workspace self.ws = workspace.GamsWorkspace( working_directory=working_directory, system_directory=gamspath, debug=1) except Exception as e: raise HydraPluginError( "Unable to import modules from gams. Please ensure that gams with version greater than 24.1 is installed." )
def read_rule_line(self, line, field_idx): """ Read a single line from the rules file and return a rule object. """ rule_data = line.split(',') rule_name = rule_data[field_idx['name']].strip() #Check if the rule already exists. if rule_name in self.Rules: rule = self.Rules[rule_name] rule_id = rule.id log.debug('rule %s exists.' % rule_name) else: ref_key = rule_data[field_idx['type']].strip().upper() ref_name = rule_data[field_idx['resource']].strip() rule_id = None try: if ref_key == 'NODE': ref_id = self.Nodes[ref_name] elif ref_key == 'LINK': ref_id = self.Links[ref_name] elif ref_key == 'GROUP': ref_id = self.Groups[ref_name] else: log.critical("Unknown reference type %s. Carrying on" % ref_key) except KeyError: raise HydraPluginError( "Rule error: Unknown %s named %s. Please check the name is correct." % (ref_key.lower(), ref_name)) rule = dict(id=rule_id, name=rule_name, description=rule_data[field_idx['description']].strip(), text=rule_data[field_idx['text']].strip(), ref_key=ref_key, ref_id=ref_id) return rule
def read_rule_file(self, file): """ Read rules from a rule file. THe rule file looks like: Name, Type, Resource, Text , Description rule1, Node, Node1 , some text, Desctiption of some text ... """ rule_data = get_file_data(file) keys = rule_data[0].split(',') check_header(file, keys) data = rule_data[1:] #Indicates what the mandatory columns are and where #we expect to see them. field_idx = { 'name': 0, 'type': 1, 'resource': 2, 'text': 3, 'description': 4, } for line_num, line in enumerate(data): #skip any empty lines if line.strip() in self.ignorelines: continue try: rule = self.read_rule_line(line, field_idx) except Exception, e: log.exception(e) raise HydraPluginError( "An error has occurred in file %s at line %s: %s" % (os.path.split(file)[-1], line_num + 3, e)) self.Rules[rule['name']] = rule
def __init__(self, url=None, session_id=None): self.errors = [] self.warnings = [] self.files = [] self.connection = JsonConnection(url) if session_id is not None: log.info("Using existing session %s", session_id) self.connection.session_id=session_id else: self.connection.login() all_attributes = self.call('get_all_attributes') self.attributes = {} if not all_attributes: raise HydraPluginError("An error has occurred. Please check that the " "network and all attributes are available.") for attr in all_attributes: self.attributes[attr.id] = attr.name self.num_steps = 7
def run(self): ''' run the GAMS model and raise an error if something going wrong ''' self.job.run(checkpoint=self.cp) #, gams_options=options.ESolPrint) if self.model_name is not None: try: status = self.job.out_db["ms"].find_record().value s_status = self.job.out_db["Sos"].find_record().value #t_status=self.job.out_db["TSos"].find_record().value except: log.warn( "Could not check solver and model termination status.") return log.warn("status: " + str(status)) modelerror = self.check_model_status(status) solvererror = self.check_solver_status(s_status) if (modelerror is not None or solvererror is not None): raise HydraPluginError("Model error: " + str(modelerror) + "\nSolver error: " + str(solvererror))
def create_array(dataset, restriction_dict={}): """ Create a (multi-dimensional) array from csv data """ #First column is always the array dimensions arr_shape = dataset[0] #The actual data is everything after column 0 eval_dataset = [] for d in dataset[1:]: try: d = eval(d) except: d = str(d) eval_dataset.append(d) #dataset = [eval(d) for d in dataset[1:]] #If the dimensions are not set, we assume the array is 1D if arr_shape != '': array_shape = tuple([int(a) for a in arr_shape.split(" ")]) else: array_shape = (len(eval_dataset), ) #Reshape the array back to its correct dimensions arr = np.array(eval_dataset) try: arr = np.reshape(arr, array_shape) except: raise HydraPluginError("You have an error with your array data." " Please ensure that the dimension is correct." " (array = %s, dimension = %s)" % (arr, array_shape)) validate_value(arr.tolist(), restriction_dict) arr = json.dumps(arr.tolist()) return arr
def xsd_validate(template_file): """ Validate a template against the xsd. Return the xml tree if successful. """ with open(template_file) as f: xml_template = f.read() template_xsd_path = os.path.expanduser(config.get('templates', 'template_xsd_path')) log.info("Template xsd: %s", template_xsd_path) xmlschema_doc = etree.parse(template_xsd_path) xmlschema = etree.XMLSchema(xmlschema_doc) xml_tree = etree.fromstring(xml_template) try: xmlschema.assertValid(xml_tree) except etree.DocumentInvalid as e: raise HydraPluginError('Template validation failed: ' + e.message) log.info("Template XSD validation successful.") return xml_tree
def check_args(args): try: int(args.network_id) except (TypeError, ValueError): raise HydraPluginError('No network is specified.') try: int(args.scenario_id) except (TypeError, ValueError): raise HydraPluginError('No senario is specified.') if args.gms_file is None: raise HydraPluginError('Gams file is not specifed.') elif os.path.isfile(os.path.expanduser(args.gms_file)) == False: raise HydraPluginError('Gams file ' + args.gms_file + ' not found.') elif args.output == None: args.output = get_input_file_name(args.gms_file) if args.output is None: raise HydraPluginError('No output file specified') elif os.path.exists(os.path.dirname(os.path.realpath( args.output))) == False: raise HydraPluginError('Output file directory ' + os.path.dirname(args.output) + ' does not exist.')
def create_dataset( value, resource_attr, unit_id, resource_name, metadata, restriction_dict, expand_filenames, basepath, file_dict, default_name, timezone, ): resourcescenario = dict() global seasonal_key if seasonal_key is None: seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999') if metadata.get('name'): dataset_name = metadata['name'] del (metadata['name']) else: dataset_name = 'Import CSV data' dataset = dict( id=None, type=None, unit_id=None, name=dataset_name, value=None, hidden='N', metadata=None, ) resourcescenario['attr_id'] = resource_attr['attr_id'] resourcescenario['resource_attr_id'] = resource_attr['id'] value = value data_columns = None try: float(value) dataset['type'] = 'scalar' scal = create_scalar(value, restriction_dict) dataset['value'] = scal except ValueError: #Check if it's an array or timeseries by first seeing if the value points #to a valid file. value = value.replace('\\', '/') try: filedata = [] if expand_filenames: full_file_path = os.path.join(basepath, value) if file_dict.get(full_file_path) is None: with open(full_file_path) as f: filedata = [] for l in f: l = re.sub('\s*,\s*', ',', l) l = re.sub('^ *', '', l) l = re.sub(' *$', '', l) l = l.replace('\n', '').replace('\r', '').split(',') filedata.append(l) file_dict[full_file_path] = filedata else: filedata = file_dict[full_file_path] #The name of the resource is how to identify the data for it. #Once this the correct line(s) has been identified, remove the #name from the start of the line data = [] for l in filedata: l_resource_name = l[0] if l_resource_name == resource_name: data.append(l[1:]) if len(data) == 0: log.info('%s: No data found in file %s' % (resource_name, value)) raise HydraPluginError('%s: No data found in file %s' % (resource_name, value)) else: if is_timeseries(data): data_columns = get_data_columns(filedata) ts = create_timeseries( data, restriction_dict=restriction_dict, data_columns=data_columns, filename=value, timezone=timezone) dataset['type'] = 'timeseries' dataset['value'] = ts else: dataset['type'] = 'array' if len(filedata) > 0: try: dataset['value'] = create_array( data[0], restriction_dict) except Exception as e: log.exception(e) raise HydraPluginError("There is a value " "error in %s. " "Please check value" " %s is correct." % (value, data[0])) else: dataset['value'] = None else: raise IOError except IOError as e: dataset['type'] = 'descriptor' desc = create_descriptor(value, restriction_dict) dataset['value'] = desc if unit_id is not None: dataset['unit_id'] = unit_id dataset['name'] = default_name resourcescenario['dataset'] = dataset m = {} if metadata: m = metadata if data_columns: m['data_struct'] = '|'.join(data_columns) m = json.dumps(m) dataset['metadata'] = m return resourcescenario
def write_time_index(self): """ Using the time-axis determined in __init__, write the time axis to the output file. """ if (self.time_axis is None): return log.info("Writing time index") self.times_table = {} try: if self.use_gams_date_index is True: years, months, days = self.get_years_months_days() t = 'SETS\n yr /\n' for year in years: t = t + str(year) + '\n' t = t + '/\n\n' t = t + 'SETS\n mn /\n' for month in months: t = t + str(month) + '\n' t = t + '/\n\n' t = t + 'SETS\n dy /\n' for day in days: t = t + str(day) + '\n' #t=t+'/\n\n' time_index = [ t + '\n\n' ] ####', '* Time index\n','t(yr, mn, dy) time index /\n'] else: time_index = ['SETS\n\n', '* Time index\n', 't time index /\n'] t = 0 for date in self.time_axis: self.time_index.append(date) if self.use_gams_date_index is True: _t = str(date.year) + " . " + str( date.month) + " . " + str(date.day) self.times_table[date] = _t else: time_index.append('%s\n' % t) self.times_table[date] = t t += 1 time_index.append('/\n\n') time_index.append( '* define time steps dependent on time index (t)\n\n') if self.use_gams_date_index is True: time_index.append('Parameter timestamp(yr, mn, dy) ;\n\n') else: time_index.append('Parameter timestamp(t) ;\n\n') #print "wrinting time" for t, date in enumerate(self.time_index): if self.use_gams_date_index is True: keyy = str(date.year) + "\",\"" + str( date.month) + "\", \"" + str(date.day) time_index.append(' timestamp("%s") = %s ;\n' % \ (keyy, convert_date_to_timeindex(date))) else: time_index.append(' timestamp("%s") = %s ;\n' % \ (self.times_table[date], convert_date_to_timeindex(date))) time_index.append('\n\n') self.output = self.output + ''.join(time_index) log.info("Time index written") except Exception as e: log.exception(e) raise HydraPluginError( "Please check time-axis or start time, end times and time step." )
def export_timeseries_using_attributes(self, resources, res_type=None): """Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attr_outputs = [] #Identify all the timeseries attributes and unique attribute #names for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) ff = '{0:<' + self.name_len + '}' t_ = ff.format('') for timestamp in self.time_index: t_ = t_ + ff.format(self.times_table[timestamp]) for attribute in attributes: if (self.time_axis is None): raise HydraPluginError( "Missing time axis or start date, end date and time step or bad format" ) attr_outputs.append('\n*' + attribute.name) if islink: if self.links_as_name: attr_outputs.append('\nTable ' + attribute.name + ' (link_namei,j') else: attr_outputs.append('\nTable ' + attribute.name + ' (i,j') else: attr_outputs.append('\nTable ' + attribute.name + ' (i') if self.use_gams_date_index is True: attr_outputs.append(', yr, mn, dy)\n') else: attr_outputs.append(', t)\n') if self.links_as_name: attr_outputs.append('\n' + ff.format('')) attr_outputs.append(str(t_)) else: attr_outputs.append('\n' + str(t_)) #Identify the datasets that we need data for for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) #Only interested in attributes with data and that are timeseries if attr is None or attr.dataset_id is None or attr.dataset_type != "timeseries": continue #Pass in the JSON value and the list of timestamps, #Get back a dictionary with values, keyed on the timestamps try: all_data = self.get_time_value(attr.value, self.time_index) except Exception as e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError( "Error finding value attribute %s on" "resource %s" % (attr.name, resource.name)) if islink: if self.links_as_name: attr_outputs.append('\n' + ff.format(resource.name + '.' + resource.from_node + '.' + resource.to_node)) attr_outputs.append(ff.format('\t')) else: attr_outputs.append('\n' + ff.format(resource.gams_name)) else: attr_outputs.append('\n' + ff.format(resource.name)) #Get each value in turn and add it to the line for timestamp in self.time_index: tmp = all_data[timestamp] if isinstance(tmp, list): data = "-".join(tmp) ff_ = '{0:<' + self.array_len + '}' data_str = ff_.format(str(data)) else: data = str(tmp) data_str = ff.format(str(float(data))) attr_outputs.append(data_str) attr_outputs.append('\n') attr_outputs.append('\n') return attr_outputs
def export_timeseries_using_type(self, resources, obj_type, res_type=None): """Export time series. """ islink = res_type == 'LINK' attributes = [] attr_names = [] attr_outputs = [] #Identify only the timeseries values we're interested in. for resource in resources: for attr in resource.attributes: if attr.dataset_type == 'timeseries' and attr.is_var is False: attr.name = translate_attr_name(attr.name) if attr.name not in attr_names: attributes.append(attr) attr_names.append(attr.name) if len(attributes) > 0: attr_outputs.append('SETS\n\n') # Needed before sets are defined attr_outputs.append(obj_type + '_timeseries /\n') for attribute in attributes: attr_outputs.append(attribute.name + '\n') attr_outputs.append('/\n\n') if islink: attr_outputs.append('Table ' + obj_type + \ '_timeseries_data(t,i,j,' + obj_type + \ '_timeseries) \n\n ') else: attr_outputs.append('Table ' + obj_type + \ '_timeseries_data(t,i,' + obj_type + \ '_timeseries) \n\n ') col_header_length = dict() for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) if attr is not None and attr.dataset_id is not None: if islink: col_header = ' %14s' % (resource.gams_name + '.' + attribute.name) col_header_length.update({ (attribute, resource): len(col_header) }) attr_outputs.append(col_header) else: col_header = ' %14s' % (resource.name + '.' + attribute.name) col_header_length.update({ (attribute, resource): len(col_header) }) attr_outputs.append(col_header) attr_outputs.append('\n') resource_data_cache = {} for timestamp in self.time_index: attr_outputs.append('{0:<7}'.format( self.times_table[timestamp])) for attribute in attributes: for resource in resources: attr = resource.get_attribute(attr_name=attribute.name) #Only interested in attributes with data if attr is None or attr.dataset_id is None: continue #Pass in the JSON value and the list of timestamps, #Get back a dictionary with values, keyed on the timestamps try: all_data = resource_data_cache.get( (resource.name, attribute.name)) if all_data is None: all_data = self.get_time_value( attr.value, self.time_index) resource_data_cache[( resource.name, attribute.name)] = all_data except Exception as e: log.exception(e) all_data = None if all_data is None: raise HydraPluginError( "Error finding value attribute %s on" "resource %s" % (attr.name, resource.name)) #Get each value in turn and add it to the line data = all_data[timestamp] try: data_str = ' %14f' % float(data) except: ff_ = '{0:<' + self.array_len + '}' data_str = ff_.format(str(data)) attr_outputs.append( data_str.rjust(col_header_length[(attribute, resource)])) attr_outputs.append('\n') attr_outputs.append('\n') return attr_outputs