def new_dataset(spj, element): ncols = int(element.attrib.pop('ncols',0)) typecodes = element.attrib.pop('typecodes','') ds = Dataset(**element.attrib) # metadata for eMetaitem in element.findall('Metadata/Metaitem'): key = eMetaitem.attrib['key'] value = eMetaitem.text ds.metadata[key] = unicode(value) # actual Table if element.tag == 'Table': # Extract additional column information. # This information will be passed on to 'set_table_import', # which will pass it on to the internal importer. column_props = list() for i in range(ncols): column_props.append(dict()) for eColumn in element.findall('Column'): n = int(eColumn.get('n')) p = column_props[n] for eInfo in eColumn.findall('Info'): key = eInfo.get('key', None) if key is not None: p[key] = unicode(eInfo.text) filename = os.path.join('datasets', dataset_filename(ds.key)) # TODO: replace DEFAULT_FF with read value ds.set_table_import(spj, filename, typecodes, column_props, DEFAULT_FF) return ds
def new_dataset(spj, element): ds = Dataset(**element.attrib) # metadata for eMetaitem in element.findall('Metadata/Metaitem'): key = eMetaitem.attrib['key'] value = eMetaitem.text ds.metadata[key] = unicode(value) # actual Table if element.tag == 'Table': ### extract metadata special to Table objects typecodes = element.get('typecodes', '') # fill columns, if information is available columns = list() for tc in typecodes: columns.append(Column(data=array((),tc))) for eColumn in element.findall('Column'): n = int(eColumn.get('n')) column = columns[n] for eInfo in eColumn.findall('Info'): key = eInfo.get('key', None) if key is not None: column.set_value(key, eInfo.text) filename = os.path.join('datasets', dataset_filename(ds.key)) # TODO: replace DEFAULT_FF with read value ds.set_table_import(spj, filename, typecodes, columns, DEFAULT_FF) return ds