예제 #1
0
    def export_network(self):
        '''
        export the network from Hydra
        '''
        write_progress(2, self.steps)
        net = self.connection.call(
            'get_network', {
                'network_id': self.network_id,
                'include_data': 'Y',
                'template_id': self.template_id,
                'scenario_ids': [self.scenario_id]
            })

        log.info("Network retrieved")
        attrs = self.connection.call('get_all_attributes', {})
        log.info("%s attributes retrieved", len(attrs))
        self.net = net
        self.network = HydraNetwork()
        self.network.load(net, attrs)
        log.info("Loading net into network.")
        nodes_map = dict()
        for node in net.nodes:
            nodes_map[node.id] = node.name
        self.get_longest_node_link_name()
        write_progress(3, self.steps)
        self.output_file_contents.append("# Network-ID:  " +
                                         str(self.network_id))
        self.output_file_contents.append("\n# Scenario-ID: " +
                                         str(self.scenario_id))
        self.output_file_contents.append("\n#" + "*" * 100)

        self.write_nodes()
        write_progress(4, self.steps)
        self.write_links(nodes_map)
        write_progress(5, self.steps)
        self.export_node_groups()
        nodes_types = self.network.get_node_types(template_id=self.template_id)
        links_types = self.network.get_link_types(template_id=self.template_id)
        self.export_node_types(nodes_types)
        self.export_links_types(links_types)
        write_progress(6, self.steps)
        if len(self.time_index) > 0:
            self.output_file_contents.append('\nset time_step:=')
            for timestep in self.time_index.keys():
                self.output_file_contents.append(" " + str(timestep))
            self.output_file_contents.append(';\n')

            self.output_file_contents.append('\nset actual_time_step:=')
            for timestep in self.time_index.values():
                self.output_file_contents.append(" " + str(timestep))
            self.output_file_contents.append(';\n')
        write_progress(7, self.steps)

        if self.export_by_type is True:
            self.export_data_using_types(nodes_types, links_types)
        else:
            self.export_data_using_attributes()
예제 #2
0
파일: Exporter.py 프로젝트: UMWRG/PyomoApp
    def export_network (self):
        '''
        export the network from Hydra
        '''
        write_progress(2, self.steps)
        net = self.connection.call('get_network', {'network_id':self.network_id,
                                                   'include_data': 'Y',
                                                   'template_id':self.template_id,
                                                   'scenario_ids':[self.scenario_id]})

        log.info("Network retrieved")
        attrs = self.connection.call('get_all_attributes', {})
        log.info("%s attributes retrieved", len(attrs))
        self.net=net
        self.network= HydraNetwork()
        self.network.load(net , attrs)
        log.info("Loading net into network.")
        nodes_map=dict ()
        for node in net.nodes:
            nodes_map[node.id]=node.name
        self.get_longest_node_link_name();
        write_progress(3, self.steps)
        self.output_file_contents.append("# Network-ID:  "+str(self.network_id));
        self.output_file_contents.append("\n# Scenario-ID: "+str(self.scenario_id));
        self.output_file_contents.append("\n#" + "*"*100)

        self.write_nodes()
        write_progress(4, self.steps)
        self.write_links(nodes_map)
        write_progress(5, self.steps)
        self.export_node_groups()
        nodes_types=self.network.get_node_types(template_id=self.template_id)
        links_types=self.network.get_link_types(template_id=self.template_id)
        self.export_node_types(nodes_types)
        self.export_links_types(links_types)
        write_progress(6, self.steps)
        if len(self.time_index)>0:
            self.output_file_contents.append('\nset time_step:=')
            for timestep in self.time_index.keys():
                self.output_file_contents.append(" " +str(timestep))
            self.output_file_contents.append(';\n')

            self.output_file_contents.append('\nset actual_time_step:=')
            for timestep in self.time_index.values():
                self.output_file_contents.append(" " +str(timestep))
            self.output_file_contents.append(';\n')
        write_progress(7, self.steps)

        if self.export_by_type is True:
            self.export_data_using_types(nodes_types, links_types)
        else:
            self.export_data_using_attributes()
예제 #3
0
파일: Exporter.py 프로젝트: UMWRG/PyomoApp
class Exporter (JSONPlugin):

    def __init__(self, args, link_export_flag, steps):

        if link_export_flag == 'l':
            self.links_as_name = True
        else:
            self.links_as_name = False
        self.steps=steps

        self.use_gams_date_index=False
        self.network_id = int(args.network)
        self.scenario_id = int(args.scenario)
        self.template_id = int(args.template_id) if args.template_id is not None else None
        self.output_file = args.output
        self.export_by_type =args.export_by_type
        self.time_index = []
        write_progress(1, self.steps)
        self.output_file_contents=[];
        self.output_file_contents.append("#%s\n"%("*"*78,))
        self.output_file_contents.append("# Data exported from Hydra using PyomoPlugin.\n")
        self.output_file_contents.append("# (c) Copyright 2015, University of Manchester\n")
        self.time_index = {}
        self.connect(args)

    def export_network (self):
        '''
        export the network from Hydra
        '''
        write_progress(2, self.steps)
        net = self.connection.call('get_network', {'network_id':self.network_id,
                                                   'include_data': 'Y',
                                                   'template_id':self.template_id,
                                                   'scenario_ids':[self.scenario_id]})

        log.info("Network retrieved")
        attrs = self.connection.call('get_all_attributes', {})
        log.info("%s attributes retrieved", len(attrs))
        self.net=net
        self.network= HydraNetwork()
        self.network.load(net , attrs)
        log.info("Loading net into network.")
        nodes_map=dict ()
        for node in net.nodes:
            nodes_map[node.id]=node.name
        self.get_longest_node_link_name();
        write_progress(3, self.steps)
        self.output_file_contents.append("# Network-ID:  "+str(self.network_id));
        self.output_file_contents.append("\n# Scenario-ID: "+str(self.scenario_id));
        self.output_file_contents.append("\n#" + "*"*100)

        self.write_nodes()
        write_progress(4, self.steps)
        self.write_links(nodes_map)
        write_progress(5, self.steps)
        self.export_node_groups()
        nodes_types=self.network.get_node_types(template_id=self.template_id)
        links_types=self.network.get_link_types(template_id=self.template_id)
        self.export_node_types(nodes_types)
        self.export_links_types(links_types)
        write_progress(6, self.steps)
        if len(self.time_index)>0:
            self.output_file_contents.append('\nset time_step:=')
            for timestep in self.time_index.keys():
                self.output_file_contents.append(" " +str(timestep))
            self.output_file_contents.append(';\n')

            self.output_file_contents.append('\nset actual_time_step:=')
            for timestep in self.time_index.values():
                self.output_file_contents.append(" " +str(timestep))
            self.output_file_contents.append(';\n')
        write_progress(7, self.steps)

        if self.export_by_type is True:
            self.export_data_using_types(nodes_types, links_types)
        else:
            self.export_data_using_attributes()

    def get_longest_node_link_name(self):
        '''
        get the length of longest node and link name to be used
         for file format
        '''
        node_name_len=0
        for node in self.network.nodes:
            if len(node.name)>node_name_len:
                node_name_len=len(node.name)

        self.ff='{0:<'+str(2*node_name_len+5)+'}'
        self.ff__=2*node_name_len+5

    def save_file(self):
        '''
        save output file
        '''
        write_progress(8, self.steps)
        log.info("writing data to file")
        file = open(self.output_file, "w")
        file.write("".join(self.output_file_contents))
        file.close()

    def write_nodes(self):
        '''
        write nodes to output file
        '''
        self.output_file_contents.append("\n\nset  nodes := ")
        for node in self.network.nodes:
            self.output_file_contents.append(" "+node.name)
        self.output_file_contents.append(';')

    def write_links(self, nodes_map):
        '''
        write links to output file
        '''
        self.output_file_contents.append("\n\nset  links:= ")
        for link in self.network.links:
             if self.links_as_name is False:
                 self.output_file_contents.append("\n"+ link.from_node+" "+link.to_node)
             else:
                self.output_file_contents.append("\n"+ link.name)

        self.output_file_contents.append(';\n')

    def export_node_groups(self):
        "Export node groups if there are any."
        node_groups = []
        self.output_file_contents.append("\n#Nodes groups\n")
        for group in self.network.groups:
            group_nodes = self.network.get_node(group=group.ID)
            if len(group_nodes) > 0:
                node_groups.append(group)
                self.output_file_contents.append("\nset  "+group.name+":= \n")
                for node in group_nodes:
                    self.output_file_contents.append(node.name+'\n')
                self.output_file_contents.append(';\n')

    def export_node_types(self, nodes_types):
        "Export node groups if there are any."
        self.output_file_contents.append("\n#Nodes types\n")
        for node_type in nodes_types:
            self.output_file_contents.append("\nset  "+node_type+":= \n")
            #for node in self.network.nodes:
            for node in self.network.get_node(node_type=node_type):
                self.output_file_contents += node.name + '\n'
            self.output_file_contents.append(';\n')

    def export_links_types(self, links_types):
            "Export node groups if there are any."
            for link_type in links_types:
                self.output_file_contents.append("\nset  "+link_type+":= \n")
                for link in self.network.get_link(link_type=link_type):
                     if self.links_as_name is False:
                         self.output_file_contents.append("\n"+ link.from_node+" "+link.to_node)
                     else:
                         self.output_file_contents.append("\n"+ link.name)
                self.output_file_contents.append(';\n')

    def export_data_using_types(self, nodes_types, links_types):
        log.info("Exporting data")
        self.time_table={}
        # Export node data for each node type
        self.export_parameters_using_types([self.network], "NETWORK", 'scalar')
        self.export_parameters_using_types([self.network], "NETWORK", 'descriptor')
        self.export_timeseries_using_types([self.network], "NETWORK")

        for node_type in nodes_types:
            nodes = self.network.get_node(node_type=node_type)
            self.export_parameters_using_types(nodes, node_type, 'scalar')
            self.export_parameters_using_types(nodes, node_type, 'descriptor')
            self.export_timeseries_using_types(nodes, node_type)
            self.export_arrays(nodes)

        for link_type in links_types:
            links = self.network.get_link(link_type=link_type)
            self.export_parameters_using_types(links, link_type, 'scalar', res_type='LINK')
            self.export_parameters_using_types(links, link_type,'descriptor', res_type='LINK')
            self.export_timeseries_using_types(links, link_type, res_type='LINK')
            self.export_arrays(links)
        #
    def export_data_using_attributes (self):
        log.info("Exporting data")
        # Export node data for each node type
        #for node_type in nodes_types:
        self.time_table={}
        self.export_parameters_using_attributes([self.network], 'scalar',  res_type='NETWORK')
        self.export_parameters_using_attributes([self.network],  'descriptor', res_type='NETWORK')
        self.export_timeseries_using_attributes([self.network], res_type='NETWORK')

        self.export_arrays(self.network.nodes)
        #nodes = self.network.get_node(node_type=node_type)
        self.export_parameters_using_attributes(self.network.nodes, 'scalar')
        self.export_parameters_using_attributes(self.network.nodes,  'descriptor')
        self.export_timeseries_using_attributes(self.network.nodes)
        self.export_arrays(self.network.nodes)

        #for link_type in links_types:
        #links = self.network.get_link(link_type=link_type)
        self.export_parameters_using_attributes(self.network.links, 'scalar', res_type='LINK')
        self.export_parameters_using_attributes(self.network.links, 'descriptor', res_type='LINK')
        self.export_timeseries_using_attributes(self.network.links,  res_type='LINK')
        self.export_arrays(self.network.links)
        #
    def export_parameters_using_types(self, resources, obj_type, datatype, res_type=None):
        """Export scalars or descriptors.        """
        self.network.attributes
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == datatype and attr.is_var is False:
                    translated_attr_name = translate_attr_name(attr.name)
                    attr.name = translated_attr_name
                    if attr.name not in attr_names:
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            for attribute in attributes:
                nname="\nparam "+attribute.name+"_"+obj_type+':='
                contents=[]
                #self.output_file_contents.append("\nparam "+attribute.name+':=')
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.value is None or  attr.dataset_type != datatype:
                        continue
                    if(res_type is None):
                        name=resource.name
                    elif islink is True and self.links_as_name is False:
                        name=get_link_name_for_param(resource)
                    else:
                        name=""

                    #self.output_file_contents.append("\n "+name+"  "+str(attr.value.values()[0][0]))
                    contents.append("\n "+self.ff.format(name)+self.ff.format(str(attr.value)))
                if len(contents)>0:
                    self.output_file_contents.append(nname)
                    for st in contents:
                        self.output_file_contents.append(st)

                    self.output_file_contents.append(';\n')

    def export_parameters_using_attributes(self, resources, datatype, res_type=None):
        """
        Export scalars or descriptors.
        """
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == datatype and attr.is_var is False:
                    translated_attr_name = translate_attr_name(attr.name)
                    attr.name = translated_attr_name
                    if attr.name not in attr_names:
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            for attribute in attributes:
                nname="\nparam "+attribute.name+':='
                contents=[]
                #self.output_file_contents.append("\nparam "+attribute.name+':=')
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.value is None or  attr.dataset_type != datatype:
                        continue
                    if res_type is None:
                        name=resource.name
                    elif islink is True and self.links_as_name is False:
                            name=get_link_name_for_param(resource)
                    else:
                        name=""

                    #self.output_file_contents.append("\n "+name+"  "+str(attr.value.values()[0][0]))
                    contents.append("\n "+self.ff.format(name)+self.ff.format(str(attr.value)))
                if len(contents)>0:
                    self.output_file_contents.append(nname)
                    for st in contents:
                        self.output_file_contents.append(st)

                    self.output_file_contents.append(';\n')



    def export_timeseries_using_types(self, resources, obj_type, res_type=None):
        """
        Export time series.
        """
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        attrb_tables={}
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == 'timeseries' and attr.is_var is False:
                    if(self.time_index is None):
                         raise HydraPluginError("Missing time axis or start date, end date and time step or bad format")


                    attr.name = translate_attr_name(attr.name)
                    if attr.name not in attr_names:
                        attrb_tables[attr.name]=attr
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            dataset_ids = []
            all_res_data={}

            #Identify the datasets that we need data for
            for attribute in attributes:
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is  None or attr.dataset_id is None:
                        continue
                    dataset_ids.append(attr.dataset_id)
                    value=json.loads(attr.value)
                    all_res_data[attr.dataset_id]=value


            #We need to get the value at each time in the specified time axis,
            #so we need to identify the relevant timestamps.
            soap_times = []
            for t, timestamp in enumerate(self.time_index.values()):
                soap_times.append(date_to_string(timestamp))

            #Get all the necessary data for all the datasets we have.
            #all_data = self.connection.call('get_multiple_vals_at_time',
            #                            {'dataset_ids':dataset_ids,
            #                             'timestamps' : soap_times})


            for attribute in attributes:
                self.output_file_contents.append("\nparam "+attribute.name+"_"+obj_type+":\n")
                self.output_file_contents.append(self.write_time())
                for resource in resources:
                    name=resource.name
                    if islink is True and self.links_as_name is False:
                        name=get_link_name(resource)
                    #self.output_file_contents.append("\n  "+name)
                    nname="\n  "+name
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.dataset_id is  None or attr.dataset_type != 'timeseries':
                        continue
                    try:
                        all_data = self.get_time_value(attr.value, self.time_index.values())
                    except Exception, e:
                        log.exception(e)
                        all_data = None

                    if all_data is None:
                        raise HydraPluginError("Error finding value attribute %s on"
                                              "resource %s"%(attr.name, resource.name))
                    self.output_file_contents.append(self.ff.format(nname))

                    for timestamp in self.time_index.values():
                        tmp = all_data[timestamp]

                        if isinstance(tmp, list):
                            data="-".join(tmp)
                            ff_='{0:<'+self.array_len+'}'
                            data_str = ff_.format(str(data))
                        else:
                            data=str(tmp)
                            data_str = self.ff.format(str(float(data)))
                        self.output_file_contents.append(data_str)

                self.output_file_contents.append(';\n')
예제 #4
0
class Exporter(JSONPlugin):
    def __init__(self, args, link_export_flag, steps):

        if link_export_flag == 'l':
            self.links_as_name = True
        else:
            self.links_as_name = False
        self.steps = steps

        self.use_gams_date_index = False
        self.network_id = int(args.network)
        self.scenario_id = int(args.scenario)
        self.template_id = int(
            args.template_id) if args.template_id is not None else None
        self.output_file = args.output
        self.export_by_type = args.export_by_type
        self.time_index = []
        write_progress(1, self.steps)
        self.output_file_contents = []
        self.output_file_contents.append("#%s\n" % ("*" * 78, ))
        self.output_file_contents.append(
            "# Data exported from Hydra using PyomoPlugin.\n")
        self.output_file_contents.append(
            "# (c) Copyright 2015, University of Manchester\n")
        self.time_index = {}
        self.connect(args)

    def export_network(self):
        '''
        export the network from Hydra
        '''
        write_progress(2, self.steps)
        net = self.connection.call(
            'get_network', {
                'network_id': self.network_id,
                'include_data': 'Y',
                'template_id': self.template_id,
                'scenario_ids': [self.scenario_id]
            })

        log.info("Network retrieved")
        attrs = self.connection.call('get_all_attributes', {})
        log.info("%s attributes retrieved", len(attrs))
        self.net = net
        self.network = HydraNetwork()
        self.network.load(net, attrs)
        log.info("Loading net into network.")
        nodes_map = dict()
        for node in net.nodes:
            nodes_map[node.id] = node.name
        self.get_longest_node_link_name()
        write_progress(3, self.steps)
        self.output_file_contents.append("# Network-ID:  " +
                                         str(self.network_id))
        self.output_file_contents.append("\n# Scenario-ID: " +
                                         str(self.scenario_id))
        self.output_file_contents.append("\n#" + "*" * 100)

        self.write_nodes()
        write_progress(4, self.steps)
        self.write_links(nodes_map)
        write_progress(5, self.steps)
        self.export_node_groups()
        nodes_types = self.network.get_node_types(template_id=self.template_id)
        links_types = self.network.get_link_types(template_id=self.template_id)
        self.export_node_types(nodes_types)
        self.export_links_types(links_types)
        write_progress(6, self.steps)
        if len(self.time_index) > 0:
            self.output_file_contents.append('\nset time_step:=')
            for timestep in self.time_index.keys():
                self.output_file_contents.append(" " + str(timestep))
            self.output_file_contents.append(';\n')

            self.output_file_contents.append('\nset actual_time_step:=')
            for timestep in self.time_index.values():
                self.output_file_contents.append(" " + str(timestep))
            self.output_file_contents.append(';\n')
        write_progress(7, self.steps)

        if self.export_by_type is True:
            self.export_data_using_types(nodes_types, links_types)
        else:
            self.export_data_using_attributes()

    def get_longest_node_link_name(self):
        '''
        get the length of longest node and link name to be used
         for file format
        '''
        node_name_len = 0
        for node in self.network.nodes:
            if len(node.name) > node_name_len:
                node_name_len = len(node.name)

        self.ff = '{0:<' + str(2 * node_name_len + 5) + '}'
        self.ff__ = 2 * node_name_len + 5

    def save_file(self):
        '''
        save output file
        '''
        write_progress(8, self.steps)
        log.info("writing data to file")
        file = open(self.output_file, "w")
        file.write("".join(self.output_file_contents))
        file.close()

    def write_nodes(self):
        '''
        write nodes to output file
        '''
        self.output_file_contents.append("\n\nset  nodes := ")
        for node in self.network.nodes:
            self.output_file_contents.append(" " + node.name)
        self.output_file_contents.append(';')

    def write_links(self, nodes_map):
        '''
        write links to output file
        '''
        self.output_file_contents.append("\n\nset  links:= ")
        for link in self.network.links:
            if self.links_as_name is False:
                self.output_file_contents.append("\n" + link.from_node + " " +
                                                 link.to_node)
            else:
                self.output_file_contents.append("\n" + link.name)

        self.output_file_contents.append(';\n')

    def export_node_groups(self):
        "Export node groups if there are any."
        node_groups = []
        self.output_file_contents.append("\n#Nodes groups\n")
        for group in self.network.groups:
            group_nodes = self.network.get_node(group=group.ID)
            if len(group_nodes) > 0:
                node_groups.append(group)
                self.output_file_contents.append("\nset  " + group.name +
                                                 ":= \n")
                for node in group_nodes:
                    self.output_file_contents.append(node.name + '\n')
                self.output_file_contents.append(';\n')

    def export_node_types(self, nodes_types):
        "Export node groups if there are any."
        self.output_file_contents.append("\n#Nodes types\n")
        for node_type in nodes_types:
            self.output_file_contents.append("\nset  " + node_type + ":= \n")
            #for node in self.network.nodes:
            for node in self.network.get_node(node_type=node_type):
                self.output_file_contents += node.name + '\n'
            self.output_file_contents.append(';\n')

    def export_links_types(self, links_types):
        "Export node groups if there are any."
        for link_type in links_types:
            self.output_file_contents.append("\nset  " + link_type + ":= \n")
            for link in self.network.get_link(link_type=link_type):
                if self.links_as_name is False:
                    self.output_file_contents.append("\n" + link.from_node +
                                                     " " + link.to_node)
                else:
                    self.output_file_contents.append("\n" + link.name)
            self.output_file_contents.append(';\n')

    def export_data_using_types(self, nodes_types, links_types):
        log.info("Exporting data")
        self.time_table = {}
        # Export node data for each node type
        self.export_parameters_using_types([self.network], "NETWORK", 'scalar')
        self.export_parameters_using_types([self.network], "NETWORK",
                                           'descriptor')
        self.export_timeseries_using_types([self.network], "NETWORK")

        for node_type in nodes_types:
            nodes = self.network.get_node(node_type=node_type)
            self.export_parameters_using_types(nodes, node_type, 'scalar')
            self.export_parameters_using_types(nodes, node_type, 'descriptor')
            self.export_timeseries_using_types(nodes, node_type)
            self.export_arrays(nodes)

        for link_type in links_types:
            links = self.network.get_link(link_type=link_type)
            self.export_parameters_using_types(links,
                                               link_type,
                                               'scalar',
                                               res_type='LINK')
            self.export_parameters_using_types(links,
                                               link_type,
                                               'descriptor',
                                               res_type='LINK')
            self.export_timeseries_using_types(links,
                                               link_type,
                                               res_type='LINK')
            self.export_arrays(links)
        #
    def export_data_using_attributes(self):
        log.info("Exporting data")
        # Export node data for each node type
        #for node_type in nodes_types:
        self.time_table = {}
        self.export_parameters_using_attributes([self.network],
                                                'scalar',
                                                res_type='NETWORK')
        self.export_parameters_using_attributes([self.network],
                                                'descriptor',
                                                res_type='NETWORK')
        self.export_timeseries_using_attributes([self.network],
                                                res_type='NETWORK')

        self.export_arrays(self.network.nodes)
        #nodes = self.network.get_node(node_type=node_type)
        self.export_parameters_using_attributes(self.network.nodes, 'scalar')
        self.export_parameters_using_attributes(self.network.nodes,
                                                'descriptor')
        self.export_timeseries_using_attributes(self.network.nodes)
        self.export_arrays(self.network.nodes)

        #for link_type in links_types:
        #links = self.network.get_link(link_type=link_type)
        self.export_parameters_using_attributes(self.network.links,
                                                'scalar',
                                                res_type='LINK')
        self.export_parameters_using_attributes(self.network.links,
                                                'descriptor',
                                                res_type='LINK')
        self.export_timeseries_using_attributes(self.network.links,
                                                res_type='LINK')
        self.export_arrays(self.network.links)
        #
    def export_parameters_using_types(self,
                                      resources,
                                      obj_type,
                                      datatype,
                                      res_type=None):
        """Export scalars or descriptors.        """
        self.network.attributes
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == datatype and attr.is_var is False:
                    translated_attr_name = translate_attr_name(attr.name)
                    attr.name = translated_attr_name
                    if attr.name not in attr_names:
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            for attribute in attributes:
                nname = "\nparam " + attribute.name + "_" + obj_type + ':='
                contents = []
                #self.output_file_contents.append("\nparam "+attribute.name+':=')
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.value is None or attr.dataset_type != datatype:
                        continue
                    if (res_type is None):
                        name = resource.name
                    elif islink is True and self.links_as_name is False:
                        name = get_link_name_for_param(resource)
                    else:
                        name = ""

                    #self.output_file_contents.append("\n "+name+"  "+str(attr.value.values()[0][0]))
                    contents.append("\n " + self.ff.format(name) +
                                    self.ff.format(str(attr.value)))
                if len(contents) > 0:
                    self.output_file_contents.append(nname)
                    for st in contents:
                        self.output_file_contents.append(st)

                    self.output_file_contents.append(';\n')

    def export_parameters_using_attributes(self,
                                           resources,
                                           datatype,
                                           res_type=None):
        """
        Export scalars or descriptors.
        """
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == datatype and attr.is_var is False:
                    translated_attr_name = translate_attr_name(attr.name)
                    attr.name = translated_attr_name
                    if attr.name not in attr_names:
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            for attribute in attributes:
                nname = "\nparam " + attribute.name + ':='
                contents = []
                #self.output_file_contents.append("\nparam "+attribute.name+':=')
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.value is None or attr.dataset_type != datatype:
                        continue
                    if res_type is None:
                        name = resource.name
                    elif islink is True and self.links_as_name is False:
                        name = get_link_name_for_param(resource)
                    else:
                        name = ""

                    #self.output_file_contents.append("\n "+name+"  "+str(attr.value.values()[0][0]))
                    contents.append("\n " + self.ff.format(name) +
                                    self.ff.format(str(attr.value)))
                if len(contents) > 0:
                    self.output_file_contents.append(nname)
                    for st in contents:
                        self.output_file_contents.append(st)

                    self.output_file_contents.append(';\n')

    def export_timeseries_using_types(self,
                                      resources,
                                      obj_type,
                                      res_type=None):
        """
        Export time series.
        """
        islink = res_type == 'LINK'
        attributes = []
        attr_names = []
        attrb_tables = {}
        for resource in resources:
            for attr in resource.attributes:
                if attr.dataset_type == 'timeseries' and attr.is_var is False:
                    if (self.time_index is None):
                        raise HydraPluginError(
                            "Missing time axis or start date, end date and time step or bad format"
                        )

                    attr.name = translate_attr_name(attr.name)
                    if attr.name not in attr_names:
                        attrb_tables[attr.name] = attr
                        attributes.append(attr)
                        attr_names.append(attr.name)

        if len(attributes) > 0:
            dataset_ids = []
            all_res_data = {}

            #Identify the datasets that we need data for
            for attribute in attributes:
                for resource in resources:
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.dataset_id is None:
                        continue
                    dataset_ids.append(attr.dataset_id)
                    value = json.loads(attr.value)
                    all_res_data[attr.dataset_id] = value

            #We need to get the value at each time in the specified time axis,
            #so we need to identify the relevant timestamps.
            soap_times = []
            for t, timestamp in enumerate(self.time_index.values()):
                soap_times.append(date_to_string(timestamp))

            #Get all the necessary data for all the datasets we have.
            #all_data = self.connection.call('get_multiple_vals_at_time',
            #                            {'dataset_ids':dataset_ids,
            #                             'timestamps' : soap_times})

            for attribute in attributes:
                self.output_file_contents.append("\nparam " + attribute.name +
                                                 "_" + obj_type + ":\n")
                self.output_file_contents.append(self.write_time())
                for resource in resources:
                    name = resource.name
                    if islink is True and self.links_as_name is False:
                        name = get_link_name(resource)
                    #self.output_file_contents.append("\n  "+name)
                    nname = "\n  " + name
                    attr = resource.get_attribute(attr_name=attribute.name)
                    if attr is None or attr.dataset_id is None or attr.dataset_type != 'timeseries':
                        continue
                    try:
                        all_data = self.get_time_value(
                            attr.value, self.time_index.values())
                    except Exception, e:
                        log.exception(e)
                        all_data = None

                    if all_data is None:
                        raise HydraPluginError(
                            "Error finding value attribute %s on"
                            "resource %s" % (attr.name, resource.name))
                    self.output_file_contents.append(self.ff.format(nname))

                    for timestamp in self.time_index.values():
                        tmp = all_data[timestamp]

                        if isinstance(tmp, list):
                            data = "-".join(tmp)
                            ff_ = '{0:<' + self.array_len + '}'
                            data_str = ff_.format(str(data))
                        else:
                            data = str(tmp)
                            data_str = self.ff.format(str(float(data)))
                        self.output_file_contents.append(data_str)

                self.output_file_contents.append(';\n')