def _load_global_attributes(self): self._global_attributes = [] for gk in self._ds.ncattrs(): gbl = self._ds.getncattr(gk) attr = Attribute() attr.name = gk attr.value = gbl self._global_attributes.append(attr)
def _load_variables(self): self._variables = [] for vn in self._ds.variables.keys(): var = self._ds.variables[vn] newvar = Variable() newvar.index_key = vn newvar.name = vn newvar.attributes = [] for ak in var.ncattrs(): att = var.getncattr(ak) if ak == "units": newvar.units = att attr = Attribute() attr.name = ak attr.value = att newvar.attributes.append(attr) self._variables.append(newvar)
def _load_variables(self): self._variables = [] for vn in self._ds.variables.keys(): var = self._ds.variables[vn] newvar = Variable() newvar.index_key = vn newvar.name = vn newvar.attributes = [] for ak in var.ncattrs(): att = var.getncattr(ak) if ak == 'units': newvar.units = att attr = Attribute() attr.name = ak attr.value = att newvar.attributes.append(attr) self._variables.append(newvar)
def _load_attributes(self, filename=''): import urllib column_names = [] #looping through the whole file to get the attributes; not sure if this is such a good idea if filename.startswith('http'): f = urllib.urlopen(filename) else: f = open(filename, 'r') in_table_data = False correct_table_type = False variables_populated = False for line in f: if in_table_data: parsed_line = line.replace('%%', '') if len(column_names) == 0: column_names = parsed_line.split() column_names.reverse() elif not variables_populated: units = parsed_line.split() units.reverse() index = 0 for var in self._variables: var.units = units.pop() attr = Attribute() attr.name = 'units' attr.value = var.units var.attributes.append(attr) attr = Attribute() attr.name = 'long_name' attr.value = column_names.pop() if attr.value == 'U' or attr.value == 'V' or attr.value == 'X' or attr.value == 'Y': attr.value += ' ' + column_names.pop() var.attributes.append(attr) variables_populated = True if line.startswith('%TableType:'): parsed_line = line.partition(': ') if parsed_line[2].startswith('LLUV'): correct_table_type = True else: correct_table_type = False if line.startswith('%TableStart:'): in_table_data = True if line.startswith('%TableEnd:') and in_table_data: in_table_data = False correct_table_type = False if not in_table_data: self._parse_attribute(line, correct_table_type, self._variables, self._global_attributes) f.close()
def _parse_attribute(self, line='', correct_table_type=False, variables=None, attributes=None): #strip out leading % new_line = line.replace('%', '') parsed_line = new_line.partition(':') if parsed_line[0] == 'TableColumnTypes' and correct_table_type: cols = parsed_line[2].split(' ') index = 0 for col in cols: if not col == '' and not col == '\n': var = Variable() var.attributes = [] var.name = col var.index_key = str(index) self._variables.append(var) index += 1 elif not parsed_line[0].startswith('Table'): if not parsed_line[2] == '': att = Attribute() att.name = parsed_line[0] att.value = parsed_line[2].replace('\n', '') attributes.append(att)
def _parse_attribute(self, line="", correct_table_type=False, variables=None, attributes=None): # strip out leading % new_line = line.replace("%", "") parsed_line = new_line.partition(":") if parsed_line[0] == "TableColumnTypes" and correct_table_type: cols = parsed_line[2].split(" ") index = 0 for col in cols: if not col == "" and not col == "\n": var = Variable() var.attributes = [] var.name = col var.index_key = str(index) self._variables.append(var) index += 1 elif not parsed_line[0].startswith("Table"): if not parsed_line[2] == "": att = Attribute() att.name = parsed_line[0] att.value = parsed_line[2].replace("\n", "") attributes.append(att)
def _load_attributes(self, filename=""): import urllib column_names = [] # looping through the whole file to get the attributes; not sure if this is such a good idea if filename.startswith("http"): f = urllib.urlopen(filename) else: f = open(filename, "r") in_table_data = False correct_table_type = False variables_populated = False for line in f: if in_table_data: parsed_line = line.replace("%%", "") if len(column_names) == 0: column_names = parsed_line.split() column_names.reverse() elif not variables_populated: units = parsed_line.split() units.reverse() index = 0 for var in self._variables: var.units = units.pop() attr = Attribute() attr.name = "units" attr.value = var.units var.attributes.append(attr) attr = Attribute() attr.name = "long_name" attr.value = column_names.pop() if attr.value == "U" or attr.value == "V" or attr.value == "X" or attr.value == "Y": attr.value += " " + column_names.pop() var.attributes.append(attr) variables_populated = True if line.startswith("%TableType:"): parsed_line = line.partition(": ") if parsed_line[2].startswith("LLUV"): correct_table_type = True else: correct_table_type = False if line.startswith("%TableStart:"): in_table_data = True if line.startswith("%TableEnd:") and in_table_data: in_table_data = False correct_table_type = False if not in_table_data: self._parse_attribute(line, correct_table_type, self._variables, self._global_attributes) f.close()