def __init__(self, source_data, dataset_name, attribute, years = None, operation = None, name = None, output_type = 'csv', storage_location = None): if output_type == 'sql' and not isinstance(storage_location, DatabaseConfiguration): raise "If Table output_type is 'sql', a Database object must be passed as storage_location." elif output_type in ['dbf', 'csv', 'tab', 'esri'] and \ storage_location is not None and \ not isinstance(storage_location,str): raise "If Table output_type is %s, storage_location must be a path to the output directory"%output_type elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri']: raise "Table output_type must be either dbf, csv, tab, sql, or esri" AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location, can_write_to_db = True) self.output_type = output_type kwargs = {} if self.output_type == 'sql': kwargs['protocol'] = storage_location.protocol kwargs['username'] = storage_location.user_name kwargs['password'] = storage_location.password kwargs['hostname'] = storage_location.host_name kwargs['database_name'] = storage_location.database_name elif self.output_type == 'esri': kwargs['storage_location'] = storage_location else: kwargs['storage_location'] = self.get_storage_location() self.store = StorageFactory().get_storage( type = '%s_storage'%(self.output_type), **kwargs )
def __init__(self, source_data, dataset_name, attribute, years = None, operation = None, name = None, output_type = 'csv', storage_location = None): if output_type == 'sql' and not isinstance(storage_location, OpusDatabase): raise "If Table output_type is 'sql', an OpusDatabase object must be passed as storage_location." elif output_type in ['dbf', 'csv', 'tab', 'esri'] and \ storage_location is not None and \ not isinstance(storage_location,str): raise "If Table output_type is %s, storage_location must be a path to the output directory"%output_type elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri']: raise "Table output_type must be either dbf, csv, tab, sql, or esri" AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years=years, operation=operation, name=name, storage_location=storage_location, can_write_to_db = True) self.output_type = output_type kwargs = {} if self.output_type == 'esri': kwargs['storage_location'] = storage_location else: kwargs['storage_location'] = self.get_storage_location() self.store = StorageFactory().get_storage( type = '%s_storage'%(self.output_type), **kwargs )
def __init__(self, source_data, dataset_name, attribute = None, years = None, operation = None, name = None, storage_location = None): AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location)
def __init__(self, source_data, dataset_name, attribute = None, years = None, operation = None, name = None, scale = None, storage_location = None): if dataset_name == 'parcel': raise Exception('Cannot create a Matplotlib map for parcel dataset. Please plot at a higher geographic aggregation') AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location) self.scale = scale
def __init__(self, source_data, dataset_name, attribute = None, years = None, operation = None, name = None, scale = None, storage_location = None, project_name = None, **kwargs): AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location) self.scale = scale self.project_name = project_name self.map_args = kwargs
def __init__( self, source_data, dataset_name, attribute=None, years=None, operation=None, name=None, scale=None, storage_location=None, ): AbstractIndicator.__init__( self, source_data, dataset_name, [attribute], years, operation, name, storage_location ) self._values = None self._ginicoeff = None
def __init__(self, source_data, dataset_name, attribute=None, years=None, operation=None, name=None, scale=None, storage_location=None, project_name=None, **kwargs): AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location) self.scale = scale self.project_name = project_name self.map_args = kwargs
def __init__(self, source_data, dataset_name, attributes, name, years=None, operation=None, exclude_condition=None, output_type='tab', storage_location=None): if output_type == 'sql' and not isinstance(storage_location, OpusDatabase): raise "If DatasetTable output_type is 'sql', a OpusDatabase object must be passed as storage_location." elif output_type in ['dbf', 'csv', 'tab', 'esri'] and \ storage_location is not None and \ not isinstance(storage_location,str): raise "If DatasetTable output_type is %s, storage_location must be a path to the output directory" % output_type elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri']: raise "DatasetTable output_type needs to be either dbf, csv, tab, or sql" self.output_type = output_type self.exclude_condition = exclude_condition self.name = name AbstractIndicator.__init__(self, source_data, dataset_name, attributes, years, operation, name, storage_location=storage_location, can_write_to_db=True) self.output_type = output_type kwargs = {} if self.output_type == 'esri': kwargs['storage_location'] = storage_location else: kwargs['storage_location'] = self.get_storage_location() self.store = StorageFactory().get_storage(type='%s_storage' % (self.output_type), **kwargs)
def __init__(self, source_data, dataset_name, attributes, name, years = None, operation = None, exclude_condition = None, output_type = 'tab', storage_location = None): if output_type == 'sql' and not isinstance(storage_location, DatabaseConfiguration): raise "If DatasetTable output_type is 'sql', a Database object must be passed as storage_location." elif output_type in ['dbf', 'csv', 'tab', 'esri'] and \ storage_location is not None and \ not isinstance(storage_location,str): raise "If DatasetTable output_type is %s, storage_location must be a path to the output directory"%output_type elif output_type not in ['dbf', 'csv', 'tab', 'sql', 'esri']: raise "DatasetTable output_type needs to be either dbf, csv, tab, or sql" self.output_type = output_type self.exclude_condition = exclude_condition self.name = name AbstractIndicator.__init__(self, source_data, dataset_name, attributes, years, operation, name, storage_location=storage_location, can_write_to_db = True) self.output_type = output_type kwargs = {} if self.output_type == 'sql': kwargs['protocol'] = storage_location.protocol kwargs['username'] = storage_location.user_name kwargs['password'] = storage_location.password kwargs['hostname'] = storage_location.host_name kwargs['database_name'] = storage_location.database_name elif self.output_type == 'esri': kwargs['storage_location'] = storage_location else: kwargs['storage_location'] = self.get_storage_location() self.store = StorageFactory().get_storage( type = '%s_storage'%(self.output_type), **kwargs )
def __init__(self, source_data, dataset_name, attribute = None, years = None, operation = None, name = None, package = None, prototype_dataset = None, storage_location = None): AbstractIndicator.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location) if prototype_dataset is None: dir = directory_path_from_opus_path('%s.indicators.geotiff_files'%package) #todo: check indicator package and find appropriate prototype image prototype_dataset = os.path.join(dir,'idgrid.tif') if not os.path.exists(prototype_dataset): raise 'Error: %s does not exist. Cannot compute GeotiffMap'%prototype_dataset self.prototype_dataset = prototype_dataset