示例#1
0
def test_get_clearsky_valueerror():
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    times = pd.DatetimeIndex(start='20160101T0600-0700',
                             end='20160101T1800-0700',
                             freq='3H')
    with pytest.raises(ValueError):
        clearsky = tus.get_clearsky(times, model='invalid_model')
示例#2
0
def test_get_clearsky(mocker, times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    m = mocker.spy(pvlib.clearsky, 'ineichen')
    out = tus.get_clearsky(times)
    assert m.call_count == 1
    assert_index_equal(out.index, times)
    # check that values are 0 before sunrise and after sunset
    assert out.iloc[0, :].sum().sum() == 0
    assert out.iloc[-1:, :].sum().sum() == 0
    # check that values are > 0 during the day
    assert (out.iloc[1:-1, :] > 0).all().all()
    assert (out.columns.values == ['ghi', 'dni', 'dhi']).all()
示例#3
0
def test_get_clearsky_haurwitz(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    clearsky = tus.get_clearsky(times, model='haurwitz')
    expected = pd.DataFrame(data=np.array(
                            [[   0.        ],
                             [ 242.30085588],
                             [ 559.38247117],
                             [ 384.6873791 ],
                             [   0.        ]]),
                            columns=['ghi'],
                            index=times)
    assert_frame_equal(expected, clearsky)
示例#4
0
def test_get_clearsky_simplified_solis(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    clearsky = tus.get_clearsky(times, model='simplified_solis')
    expected = pd.DataFrame(data=np.
        array([[   0.        ,    0.        ,    0.        ],
               [  70.00146271,  638.01145669,  236.71136245],
               [ 101.69729217,  852.51950946,  577.1117803 ],
               [  86.1679965 ,  755.98048017,  385.59586091],
               [   0.        ,    0.        ,    0.        ]]),
                            columns=['dhi', 'dni', 'ghi'],
                            index=times)
    expected = expected[['ghi', 'dni', 'dhi']]
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#5
0
def test_get_clearsky_simplified_solis_dni_extra(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    clearsky = tus.get_clearsky(times, model='simplified_solis',
                                dni_extra=1370)
    expected = pd.DataFrame(data=np.
        array([[   0.        ,    0.        ,    0.        ],
               [  67.82281485,  618.15469596,  229.34422063],
               [  98.53217848,  825.98663808,  559.15039353],
               [  83.48619937,  732.45218243,  373.59500313],
               [   0.        ,    0.        ,    0.        ]]),
                            columns=['dhi', 'dni', 'ghi'],
                            index=times)
    expected = expected[['ghi', 'dni', 'dhi']]
    assert_frame_equal(expected, clearsky)
示例#6
0
def test_get_clearsky_simplified_solis_pressure(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    clearsky = tus.get_clearsky(times, model='simplified_solis',
                                pressure=95000)
    expected = pd.DataFrame(data=np.
        array([[   0.        ,    0.        ,    0.        ],
               [  70.20556637,  635.53091983,  236.17716435],
               [ 102.08954904,  850.49502085,  576.28465815],
               [  86.46561686,  753.70744638,  384.90537859],
               [   0.        ,    0.        ,    0.        ]]),
                            columns=['dhi', 'dni', 'ghi'],
                            index=times)
    expected = expected[['ghi', 'dni', 'dhi']]
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#7
0
def test_get_clearsky_simplified_solis_aod_pw(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    clearsky = tus.get_clearsky(times, model='simplified_solis',
                                aod700=0.25, precipitable_water=2.)
    expected = pd.DataFrame(data=np.
        array([[   0.        ,    0.        ,    0.        ],
               [  85.77821205,  374.58084365,  179.48483117],
               [ 143.52743364,  625.91745295,  490.06254157],
               [ 114.63275842,  506.52275195,  312.24711495],
               [   0.        ,    0.        ,    0.        ]]),
                            columns=['dhi', 'dni', 'ghi'],
                            index=times)
    expected = expected[['ghi', 'dni', 'dhi']]
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#8
0
def test_get_clearsky():
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    times = pd.DatetimeIndex(start='20160101T0600-0700',
                             end='20160101T1800-0700',
                             freq='3H')
    clearsky = tus.get_clearsky(times)
    expected = pd.DataFrame(data=np.array([
        (  0.0,                0.0,              0.0),
        (262.77734276159333, 791.1972825869296, 46.18714900637892),
        (616.764693938387,   974.9610353623959, 65.44157429054201),
        (419.6512657626518,  901.6234995035793, 54.26016437839348),
        (  0.0,                0.0,              0.0)],
        dtype=[('ghi', '<f8'), ('dni', '<f8'), ('dhi', '<f8')]), index=times)
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#9
0
def test_get_clearsky_haurwitz():
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    times = pd.DatetimeIndex(start='20160101T0600-0700',
                             end='20160101T1800-0700',
                             freq='3H')
    clearsky = tus.get_clearsky(times, model='haurwitz')
    expected = pd.DataFrame(data=np.array(
                            [[   0.        ],
                             [ 242.30085588],
                             [ 559.38247117],
                             [ 384.6873791 ],
                             [   0.        ]]),
                            columns=['ghi'],
                            index=times)
    assert_frame_equal(expected, clearsky)
示例#10
0
def test_get_clearsky():
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    times = pd.DatetimeIndex(start='20160101T0600-0700',
                             end='20160101T1800-0700',
                             freq='3H')
    clearsky = tus.get_clearsky(times)
    expected = pd.DataFrame(data=np.
        array([[   0.        ,    0.        ,    0.        ],
               [ 258.60422702,  761.57329257,   50.1235982 ],
               [ 611.96347869,  956.95353414,   70.8232806 ],
               [ 415.10904044,  878.52649603,   59.07820922],
               [   0.        ,    0.        ,    0.        ]]),
                            columns=['ghi', 'dni', 'dhi'],
                            index=times)
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#11
0
def test_get_clearsky_ineichen_supply_linke(mocker):
    tus = Location(32.2, -111, 'US/Arizona', 700)
    times = pd.date_range(start='2014-06-24-0700', end='2014-06-25-0700',
                          freq='3h')
    mocker.spy(pvlib.clearsky, 'ineichen')
    out = tus.get_clearsky(times, linke_turbidity=3)
    # we only care that the LT is passed in this test
    pvlib.clearsky.ineichen.assert_called_once_with(ANY, ANY, 3, ANY, ANY)
    assert_index_equal(out.index, times)
    # check that values are 0 before sunrise and after sunset
    assert out.iloc[0:2, :].sum().sum() == 0
    assert out.iloc[-2:, :].sum().sum() == 0
    # check that values are > 0 during the day
    assert (out.iloc[2:-2, :] > 0).all().all()
    assert (out.columns.values == ['ghi', 'dni', 'dhi']).all()
示例#12
0
def test_get_clearsky_simplified_solis_apparent_elevation(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    solar_position = {'apparent_elevation': pd.Series(80, index=times),
                      'apparent_zenith': pd.Series(10, index=times)}
    clearsky = tus.get_clearsky(times, model='simplified_solis',
                                solar_position=solar_position)
    expected = pd.DataFrame(data=np.
        array([[  131.3124497 ,  1001.14754036,  1108.14147919],
               [  131.3124497 ,  1001.14754036,  1108.14147919],
               [  131.3124497 ,  1001.14754036,  1108.14147919],
               [  131.3124497 ,  1001.14754036,  1108.14147919],
               [  131.3124497 ,  1001.14754036,  1108.14147919]]),
                            columns=['dhi', 'dni', 'ghi'],
                            index=times)
    expected = expected[['ghi', 'dni', 'dhi']]
    assert_frame_equal(expected, clearsky, check_less_precise=2)
示例#13
0
def test_get_clearsky_ineichen_supply_linke():
    tus = Location(32.2, -111, 'US/Arizona', 700)
    times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h')
    times_localized = times.tz_localize(tus.tz)
    expected = pd.DataFrame(np.
        array([[    0.        ,     0.        ,     0.        ],
               [    0.        ,     0.        ,     0.        ],
               [   79.73090244,   316.16436502,    40.45759009],
               [  703.43653498,   876.41452667,    95.15798252],
               [ 1042.37962396,   939.86391062,   118.44687715],
               [  851.32411813,   909.11186737,   105.36662462],
               [  257.18266827,   646.16644264,    62.02777094],
               [    0.        ,     0.        ,     0.        ],
               [    0.        ,     0.        ,     0.        ]]),
                            columns=['ghi', 'dni', 'dhi'],
                            index=times_localized)
    out = tus.get_clearsky(times_localized, linke_turbidity=3)
    assert_frame_equal(expected, out, check_less_precise=2)
示例#14
0
def detect_clearsky_data():
    test_dir = os.path.dirname(os.path.abspath(
        inspect.getfile(inspect.currentframe())))
    file = os.path.join(test_dir, '..', 'data', 'detect_clearsky_data.csv')
    expected = pd.read_csv(file, index_col=0, parse_dates=True, comment='#')
    expected = expected.tz_localize('UTC').tz_convert('Etc/GMT+7')
    metadata = {}
    with open(file) as f:
        for line in f:
            if line.startswith('#'):
                key, value = line.strip('# \n').split(':')
                metadata[key] = float(value)
            else:
                break
    metadata['window_length'] = int(metadata['window_length'])
    loc = Location(metadata['latitude'], metadata['longitude'],
                   altitude=metadata['elevation'])
    # specify turbidity to guard against future lookup changes
    cs = loc.get_clearsky(expected.index, linke_turbidity=2.658197)
    return expected, cs
示例#15
0
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere

from conftest import requires_ephem, requires_numba, needs_numpy_1_10

# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)

# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)

ephem_data = solarposition.get_solarposition(
    times, tus.latitude, tus.longitude, method='nrel_numpy')

irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)

dni_et = irradiance.extraradiation(times.dayofyear)

ghi = irrad_data['ghi']


# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
示例#16
0
class LocalRE(object):

    forecast_height = 10  # for DarkSky API

    def __init__(
            self,
            wind_turbines: list = [],
            pv_arrays: list = [],
            latitude: float = 57.6568,
            longitude: float = -3.5818,
            altitude: float = 10,
            roughness_length: float = 0.15,  # roughness length (bit of a guess)
            hellman_exp: float = 0.2):
        """ Set up the renewable energy generation
        """

        # This needs to be repeated in every forecast
        self.roughness_length = roughness_length

        # Initialise empty forecast dataframe, just so nothing complains
        self.wind_forecast = pd.DataFrame()

        self.pv_forecast = pd.DataFrame()

        # Wind turbine(s)
        turbines = []

        for turbine in wind_turbines:
            turbines.append({
                'wind_turbine':
                WindTurbine(turbine['name'],
                            turbine['hub_height'],
                            nominal_power=turbine['nominal_power'],
                            rotor_diameter=turbine['rotor_diameter'],
                            power_curve=turbine['power_curve']),
                'number_of_turbines':
                turbine['qty']
            })

        local_wind_farm = WindFarm('Local windfarm', turbines,
                                   [latitude, longitude])

        # TODO - check for learned local data & overwrite power_curve

        self.wind_modelchain = TurbineClusterModelChain(
            local_wind_farm,
            smoothing=False,
            hellman_exp=hellman_exp,
        )

        # Initialise PV models
        self.pv_location = Location(latitude=latitude,
                                    longitude=longitude,
                                    altitude=altitude)

        # Now set up the PV array & system.
        cec_pv_model_params = pvlib.pvsystem.retrieve_sam('CECMod')
        sandia_pv_model_params = pvlib.pvsystem.retrieve_sam('SandiaMod')
        cec_inverter_model_params = pvlib.pvsystem.retrieve_sam('CECInverter')
        adr_inverter_model_params = pvlib.pvsystem.retrieve_sam('ADRInverter')

        self.pv_modelchains = {}

        for pv_array in pv_arrays:

            # Try to find the module names in the libraries
            if pv_array['module_name'] in cec_pv_model_params:
                pv_array['module_parameters'] = cec_pv_model_params[
                    pv_array['module_name']]
            elif pv_array['module_name'] in sandia_pv_model_params:
                pv_array['module_parameters'] = sandia_pv_model_params[
                    pv_array['module_name']]
            else:
                raise RenewablesException('Could not retrieve PV module data')

            # Do the same with the inverter(s)
            if pv_array['inverter_name'] in cec_inverter_model_params:
                pv_array['inverter_parameters'] = cec_inverter_model_params[
                    pv_array['inverter_name']]
            elif pv_array['inverter_name'] in adr_inverter_model_params:
                pv_array['inverter_parameters'] = adr_inverter_model_params[
                    pv_array['inverter_name']]
            else:
                raise RenewablesException('Could not retrieve PV module data')

            self.pv_modelchains[pv_array['name']] = ModelChain(
                PVSystem(**pv_array),
                self.pv_location,
                aoi_model='physical',
                spectral_model='no_loss')

    def make_generation_forecasts(self, forecast):
        """ Makes generation forecast data from the supplied Dark Sky forecast

        Arguments:
            forecast {pandas.DataFrame} -- DarkSky originated forecast
        """

        self.pv_forecast = self._make_pv_forecast(forecast)
        self.wind_forecast = self._make_wind_forecast(forecast)

    def _make_pv_forecast(self, forecast) -> pd.DataFrame:
        """Compile the forecast required for PV generation prediction

        Uses pvlib to generate solar irradiance predictions.

        Arguments:
            forecast {pandas.DataFrame} -- DarkSky originated forecast
        """

        # Annoyingly, the PV & wind libraries want temperature named differently
        pv_forecast = forecast.rename(columns={
            'temperature': 'air_temp',
            'windSpeed': 'wind_speed',
        })

        # Use PV lib to get insolation based on the cloud cover reported here

        model = GFS()

        # Next up, we get hourly solar irradiance using interpolated cloud cover
        # We can get this from the clearsky GHI...

        if tables in sys.modules:
            # We can use Ineichen clear sky model (uses pytables for turbidity)
            clearsky = self.pv_location.get_clearsky(pv_forecast.index)

        else:
            # We can't, so use 'Simplified Solis'
            clearsky = self.pv_location.get_clearsky(pv_forecast.index,
                                                     model='simplified_solis')

        # ... and by knowledge of where the sun is
        solpos = self.pv_location.get_solarposition(pv_forecast.index)

        ghi = model.cloud_cover_to_ghi_linear(pv_forecast['cloudCover'] * 100,
                                              clearsky['ghi'])
        dni = disc(ghi, solpos['zenith'], pv_forecast.index)['dni']
        dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))

        # Whump it all together and we have our forecast!
        pv_forecast['dni'] = dni
        pv_forecast['dhi'] = dhi
        pv_forecast['ghi'] = ghi

        return pv_forecast

    def _make_wind_forecast(self, forecast) -> pd.DataFrame:
        """Creates forecast needed for wind generation prediction

        Creates renamed multidimensional columns needed for the windpowerlib
        system.

        Arguments:
            forecast {pandas.DataFrame} -- DarkSky originated forecast
        """

        # Easiest to build multiindexes up one by one.
        columns_index = pd.MultiIndex.from_tuples([('wind_speed', 10),
                                                   ('temperature', 10),
                                                   ('pressure', 10),
                                                   ('roughness_length', 0),
                                                   ('wind_bearing', 10)])
        wind_forecast = pd.DataFrame(index=forecast.index.copy(),
                                     columns=columns_index)
        wind_forecast.loc[:, ('wind_speed', 10)] = forecast['windSpeed'].loc[:]
        wind_forecast.loc[:,
                          ('temperature', 10)] = forecast['temperature'].loc[:]
        wind_forecast.loc[:, ('pressure', 10)] = forecast['pressure'].loc[:]
        wind_forecast.loc[:, ('wind_bearing',
                              10)] = forecast['windBearing'].loc[:]
        wind_forecast.loc[:, ('roughness_length', 0)] = self.roughness_length

        return wind_forecast

    def predict_generation(self, reserved_wind_consumption=0) -> pd.DataFrame:
        """ Predict electricity generated from forecast

        Will use the timestamp index of the forecast property to estimate
        instantaneous electricity generation. Returns table giving amounts in
        kWh.

        Arguments:
            reserved_wind_consumption {float} - constant amount that is assumed
                to be required from wind generation to meet other local need
        """

        prediction = pd.DataFrame(index=self.pv_forecast.index.copy())

        # First up - PV

        # Create a total gen column of zeros
        prediction['PV_AC_TOTAL'] = 0

        for pv_array, pv_model in self.pv_modelchains.items():

            pv_model.run_model(prediction.index, self.pv_forecast)
            output_column_name = 'PV_AC_' + pv_array
            prediction[output_column_name] = pv_model.ac

            # Add to the total column
            prediction['PV_AC_TOTAL'] = prediction['PV_AC_TOTAL'] + pv_model.ac

        # Next - wind power.
        self.wind_modelchain.run_model(self.wind_forecast)

        prediction['WIND_AC'] = self.wind_modelchain.power_output

        # Convert everything into kWh
        prediction = prediction * 0.001

        prediction['available_wind'] = prediction[
            'WIND_AC'] - reserved_wind_consumption
        prediction['available_wind'][prediction['available_wind'] < 0] = 0
        prediction['total'] = prediction['WIND_AC'] + prediction['PV_AC_TOTAL']
        prediction['surplus'] = prediction['available_wind'] + prediction[
            'PV_AC_TOTAL']
        prediction['surplus'][prediction['surplus'] < 0] = 0

        return prediction
示例#17
0
 
 base_rad['null_1'] = np.where(base_rad.rad_1.isnull(), 1, 0)
 
 #Quitar los valores que estén fuera de rango
 
 base_rad['range'] = np.where((base_rad.rad_1 >= -1) & (base_rad.rad_1 < 1500), 0, 1)
 
 #La diferencia de los datos no puede exceder y si es un Na tomarlo como un dato
 
 base_rad['diff_0'] = (abs((base_rad[-base_rad.rad_1.isnull()].rad_1) - (base_rad[-base_rad.rad_1.isnull()].rad_1.shift(1))) < 555.)
 base_rad['diff_1'] = np.where(base_rad.diff_0 == True, 0, 1)
 
 #Validación de los datos que no se salgan de los valores máximos
 
 tus = Location(float(estaciones[estaciones.cod == int(i[0:8])].LATITUD), float(estaciones[estaciones.cod == int(i[0:8])].LONGITUD), 'America/Bogota')
 base_rad['sky_0'] = tus.get_clearsky(pd.DatetimeIndex(base_rad.date, tz='America/Bogota'))['ghi'].reset_index(drop=True)  # ineichen with climatology table by default
 
 base_rad['sky_1'] = np.where(((base_rad.sky_0 +10) > base_rad.rad_1), 0, 1)
 
 
 
 ##Tabla de resumen para extracción de las estadísticas
 
 #if (n_datos - len(base_rad[base_rad.null_1 == 0])) < 0:
 n_datos = len(base_rad)
 
 rad_tabla = rad_tabla.append(pd.DataFrame([{'cod':i[0:8], #Código
        'isnull':(n_datos - len(base_rad[base_rad.null_1 == 0])),
        'total_isnull':n_datos,
        'range':base_rad[-base_rad.rad_1.isnull()].range.sum(),
        'diff':base_rad[-base_rad.rad_1.isnull()].diff_1.sum(),
示例#18
0
class ForecastModel(object):
    """
    An object for querying and holding forecast model information for
    use within the pvlib library.

    Simplifies use of siphon library on a THREDDS server.

    Parameters
    ----------
    model_type: string
        UNIDATA category in which the model is located.
    model_name: string
        Name of the UNIDATA forecast model.
    set_type: string
        Model dataset type.

    Attributes
    ----------
    access_url: string
        URL specifying the dataset from data will be retrieved.
    base_tds_url : string
        The top level server address
    catalog_url : string
        The url path of the catalog to parse.
    data: pd.DataFrame
        Data returned from the query.
    data_format: string
        Format of the forecast data being requested from UNIDATA.
    dataset: Dataset
        Object containing information used to access forecast data.
    dataframe_variables: list
        Model variables that are present in the data.
    datasets_list: list
        List of all available datasets.
    fm_models: Dataset
        TDSCatalog object containing all available
        forecast models from UNIDATA.
    fm_models_list: list
        List of all available forecast models from UNIDATA.
    latitude: list
        A list of floats containing latitude values.
    location: Location
        A pvlib Location object containing geographic quantities.
    longitude: list
        A list of floats containing longitude values.
    lbox: boolean
        Indicates the use of a location bounding box.
    ncss: NCSS object
        NCSS
    model_name: string
        Name of the UNIDATA forecast model.
    model: Dataset
        A dictionary of Dataset object, whose keys are the name of the
        dataset's name.
    model_url: string
        The url path of the dataset to parse.
    modelvariables: list
        Common variable names that correspond to queryvariables.
    query: NCSS query object
        NCSS object used to complete the forecast data retrival.
    queryvariables: list
        Variables that are used to query the THREDDS Data Server.
    time: DatetimeIndex
        Time range.
    variables: dict
        Defines the variables to obtain from the weather
        model and how they should be renamed to common variable names.
    units: dict
        Dictionary containing the units of the standard variables
        and the model specific variables.
    vert_level: float or integer
        Vertical altitude for query data.
    """

    access_url_key = 'NetcdfSubset'
    catalog_url = 'http://thredds.ucar.edu/thredds/catalog.xml'
    base_tds_url = catalog_url.split('/thredds/')[0]
    data_format = 'netcdf'
    vert_level = 100000

    units = {
        'temp_air': 'C',
        'wind_speed': 'm/s',
        'ghi': 'W/m^2',
        'ghi_raw': 'W/m^2',
        'dni': 'W/m^2',
        'dhi': 'W/m^2',
        'total_clouds': '%',
        'low_clouds': '%',
        'mid_clouds': '%',
        'high_clouds': '%'}

    def __init__(self, model_type, model_name, set_type):
        self.model_type = model_type
        self.model_name = model_name
        self.set_type = set_type
        self.catalog = TDSCatalog(self.catalog_url)
        self.fm_models = TDSCatalog(self.catalog.catalog_refs[model_type].href)
        self.fm_models_list = sorted(list(self.fm_models.catalog_refs.keys()))

        try:
            model_url = self.fm_models.catalog_refs[model_name].href
        except ParseError:
            raise ParseError(self.model_name + ' model may be unavailable.')

        try:
            self.model = TDSCatalog(model_url)
        except HTTPError:
            try:
                self.model = TDSCatalog(model_url)
            except HTTPError:
                raise HTTPError(self.model_name + ' model may be unavailable.')

        self.datasets_list = list(self.model.datasets.keys())
        self.set_dataset()

    def __repr__(self):
        return '{}, {}'.format(self.model_name, self.set_type)

    def set_dataset(self):
        '''
        Retrieves the designated dataset, creates NCSS object, and
        creates a NCSS query object.
        '''

        keys = list(self.model.datasets.keys())
        labels = [item.split()[0].lower() for item in keys]
        if self.set_type == 'best':
            self.dataset = self.model.datasets[keys[labels.index('best')]]
        elif self.set_type == 'latest':
            self.dataset = self.model.datasets[keys[labels.index('latest')]]
        elif self.set_type == 'full':
            self.dataset = self.model.datasets[keys[labels.index('full')]]

        self.access_url = self.dataset.access_urls[self.access_url_key]
        self.ncss = NCSS(self.access_url)
        self.query = self.ncss.query()

    def set_query_latlon(self):
        '''
        Sets the NCSS query location latitude and longitude.
        '''

        if (isinstance(self.longitude, list) and
            isinstance(self.latitude, list)):
            self.lbox = True
            # west, east, south, north
            self.query.lonlat_box(self.latitude[0], self.latitude[1],
                                  self.longitude[0], self.longitude[1])
        else:
            self.lbox = False
            self.query.lonlat_point(self.longitude, self.latitude)

    def set_location(self, time, latitude, longitude):
        '''
        Sets the location for the query.

        Parameters
        ----------
        time: datetime or DatetimeIndex
            Time range of the query.
        '''
        if isinstance(time, datetime.datetime):
            tzinfo = time.tzinfo
        else:
            tzinfo = time.tz

        if tzinfo is None:
            self.location = Location(latitude, longitude)
        else:
            self.location = Location(latitude, longitude, tz=tzinfo)

    def get_data(self, latitude, longitude, start, end,
                 vert_level=None, query_variables=None,
                 close_netcdf_data=True):
        """
        Submits a query to the UNIDATA servers using Siphon NCSS and
        converts the netcdf data to a pandas DataFrame.

        Parameters
        ----------
        latitude: float
            The latitude value.
        longitude: float
            The longitude value.
        start: datetime or timestamp
            The start time.
        end: datetime or timestamp
            The end time.
        vert_level: None, float or integer
            Vertical altitude of interest.
        variables: None or list
            If None, uses self.variables.
        close_netcdf_data: bool
            Controls if the temporary netcdf data file should be closed.
            Set to False to access the raw data.

        Returns
        -------
        forecast_data : DataFrame
            column names are the weather model's variable names.
        """
        if vert_level is not None:
            self.vert_level = vert_level

        if query_variables is None:
            self.query_variables = list(self.variables.values())
        else:
            self.query_variables = query_variables

        self.latitude = latitude
        self.longitude = longitude
        self.set_query_latlon()  # modifies self.query
        self.set_location(start, latitude, longitude)

        self.start = start
        self.end = end
        self.query.time_range(self.start, self.end)

        self.query.vertical_level(self.vert_level)
        self.query.variables(*self.query_variables)
        self.query.accept(self.data_format)

        self.netcdf_data = self.ncss.get_data(self.query)

        # might be better to go to xarray here so that we can handle
        # higher dimensional data for more advanced applications
        self.data = self._netcdf2pandas(self.netcdf_data, self.query_variables)

        if close_netcdf_data:
            self.netcdf_data.close()

        return self.data

    def process_data(self, data, **kwargs):
        """
        Defines the steps needed to convert raw forecast data
        into processed forecast data. Most forecast models implement
        their own version of this method which also call this one.

        Parameters
        ----------
        data: DataFrame
            Raw forecast data

        Returns
        -------
        data: DataFrame
            Processed forecast data.
        """
        data = self.rename(data)
        return data

    def get_processed_data(self, *args, **kwargs):
        """
        Get and process forecast data.

        Parameters
        ----------
        *args: positional arguments
            Passed to get_data
        **kwargs: keyword arguments
            Passed to get_data and process_data

        Returns
        -------
        data: DataFrame
            Processed forecast data
        """
        return self.process_data(self.get_data(*args, **kwargs), **kwargs)

    def rename(self, data, variables=None):
        """
        Renames the columns according the variable mapping.

        Parameters
        ----------
        data: DataFrame
        variables: None or dict
            If None, uses self.variables

        Returns
        -------
        data: DataFrame
            Renamed data.
        """
        if variables is None:
            variables = self.variables
        return data.rename(columns={y: x for x, y in variables.items()})

    def _netcdf2pandas(self, netcdf_data, query_variables):
        """
        Transforms data from netcdf to pandas DataFrame.

        Parameters
        ----------
        data: netcdf
            Data returned from UNIDATA NCSS query.
        query_variables: list
            The variables requested.

        Returns
        -------
        pd.DataFrame
        """
        # set self.time
        try:
            time_var = 'time'
            self.set_time(netcdf_data.variables[time_var])
        except KeyError:
            # which model does this dumb thing?
            time_var = 'time1'
            self.set_time(netcdf_data.variables[time_var])

        data_dict = {key: data[:].squeeze() for key, data in
                     netcdf_data.variables.items() if key in query_variables}

        return pd.DataFrame(data_dict, index=self.time)

    def set_time(self, time):
        '''
        Converts time data into a pandas date object.

        Parameters
        ----------
        time: netcdf
            Contains time information.

        Returns
        -------
        pandas.DatetimeIndex
        '''
        times = num2date(time[:].squeeze(), time.units)
        self.time = pd.DatetimeIndex(pd.Series(times), tz=self.location.tz)

    def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35,
                                  **kwargs):
        """
        Convert cloud cover to GHI using a linear relationship.

        0% cloud cover returns ghi_clear.

        100% cloud cover returns offset*ghi_clear.

        Parameters
        ----------
        cloud_cover: numeric
            Cloud cover in %.
        ghi_clear: numeric
            GHI under clear sky conditions.
        offset: numeric
            Determines the minimum GHI.
        kwargs
            Not used.

        Returns
        -------
        ghi: numeric
            Estimated GHI.

        References
        ----------
        Larson et. al. "Day-ahead forecasting of solar power output from
        photovoltaic plants in the American Southwest" Renewable Energy
        91, 11-20 (2016).
        """

        offset = offset / 100.
        cloud_cover = cloud_cover / 100.
        ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear
        return ghi

    def cloud_cover_to_irradiance_clearsky_scaling(self, cloud_cover,
                                                   method='linear',
                                                   **kwargs):
        """
        Estimates irradiance from cloud cover in the following steps:

        1. Determine clear sky GHI using Ineichen model and
           climatological turbidity.
        2. Estimate cloudy sky GHI using a function of
           cloud_cover e.g.
           :py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
        3. Estimate cloudy sky DNI using the DISC model.
        4. Calculate DHI from DNI and DHI.

        Parameters
        ----------
        cloud_cover : Series
            Cloud cover in %.
        method : str
            Method for converting cloud cover to GHI.
            'linear' is currently the only option.
        **kwargs
            Passed to the method that does the conversion

        Returns
        -------
        irrads : DataFrame
            Estimated GHI, DNI, and DHI.
        """
        solpos = self.location.get_solarposition(cloud_cover.index)
        cs = self.location.get_clearsky(cloud_cover.index, model='ineichen',
                                        solar_position=solpos)

        method = method.lower()
        if method == 'linear':
            ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
                                                 **kwargs)
        else:
            raise ValueError('invalid method argument')

        dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
        dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))

        irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
        return irrads

    def cloud_cover_to_transmittance_linear(self, cloud_cover, offset=0.75,
                                            **kwargs):
        """
        Convert cloud cover to atmospheric transmittance using a linear
        model.

        0% cloud cover returns offset.

        100% cloud cover returns 0.

        Parameters
        ----------
        cloud_cover : numeric
            Cloud cover in %.
        offset : numeric
            Determines the maximum transmittance.
        kwargs
            Not used.

        Returns
        -------
        ghi : numeric
            Estimated GHI.
        """
        transmittance = ((100.0 - cloud_cover) / 100.0) * 0.75

        return transmittance

    def cloud_cover_to_irradiance_liujordan(self, cloud_cover, **kwargs):
        """
        Estimates irradiance from cloud cover in the following steps:

        1. Determine transmittance using a function of cloud cover e.g.
           :py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear`
        2. Calculate GHI, DNI, DHI using the
           :py:func:`pvlib.irradiance.liujordan` model

        Parameters
        ----------
        cloud_cover : Series

        Returns
        -------
        irradiance : DataFrame
            Columns include ghi, dni, dhi
        """
        # in principle, get_solarposition could use the forecast
        # pressure, temp, etc., but the cloud cover forecast is not
        # accurate enough to justify using these minor corrections
        solar_position = self.location.get_solarposition(cloud_cover.index)
        dni_extra = extraradiation(cloud_cover.index)
        airmass = self.location.get_airmass(cloud_cover.index)

        transmittance = self.cloud_cover_to_transmittance_linear(cloud_cover,
                                                                 **kwargs)

        irrads = liujordan(solar_position['apparent_zenith'],
                           transmittance, airmass['airmass_absolute'],
                           dni_extra=dni_extra)
        irrads = irrads.fillna(0)

        return irrads

    def cloud_cover_to_irradiance(self, cloud_cover, how='clearsky_scaling',
                                  **kwargs):
        """
        Convert cloud cover to irradiance. A wrapper method.

        Parameters
        ----------
        cloud_cover : Series
        how : str
            Selects the method for conversion. Can be one of
            clearsky_scaling or liujordan.
        **kwargs
            Passed to the selected method.

        Returns
        -------
        irradiance : DataFrame
            Columns include ghi, dni, dhi
        """

        how = how.lower()
        if how == 'clearsky_scaling':
            irrads = self.cloud_cover_to_irradiance_clearsky_scaling(
                cloud_cover, **kwargs)
        elif how == 'liujordan':
            irrads = self.cloud_cover_to_irradiance_liujordan(
                cloud_cover, **kwargs)
        else:
            raise ValueError('invalid how argument')

        return irrads

    def kelvin_to_celsius(self, temperature):
        """
        Converts Kelvin to celsius.

        Parameters
        ----------
        temperature: numeric

        Returns
        -------
        temperature: numeric
        """
        return temperature - 273.15

    def isobaric_to_ambient_temperature(self, data):
        """
        Calculates temperature from isobaric temperature.

        Parameters
        ----------
        data: DataFrame
            Must contain columns pressure, temperature_iso,
            temperature_dew_iso. Input temperature in K.

        Returns
        -------
        temperature : Series
            Temperature in K
        """

        P = data['pressure'] / 100.0
        Tiso = data['temperature_iso']
        Td = data['temperature_dew_iso'] - 273.15

        # saturation water vapor pressure
        e = 6.11 * 10**((7.5 * Td) / (Td + 273.3))

        # saturation water vapor mixing ratio
        w = 0.622 * (e / (P - e))

        T = Tiso - ((2.501 * 10.**6) / 1005.7) * w

        return T

    def uv_to_speed(self, data):
        """
        Computes wind speed from wind components.

        Parameters
        ----------
        data : DataFrame
            Must contain the columns 'wind_speed_u' and 'wind_speed_v'.

        Returns
        -------
        wind_speed : Series
        """
        wind_speed = np.sqrt(data['wind_speed_u']**2 + data['wind_speed_v']**2)

        return wind_speed

    def gust_to_speed(self, data, scaling=1/1.4):
        """
        Computes standard wind speed from gust.
        Very approximate and location dependent.

        Parameters
        ----------
        data : DataFrame
            Must contain the column 'wind_speed_gust'.

        Returns
        -------
        wind_speed : Series
        """
        wind_speed = data['wind_speed_gust'] * scaling

        return wind_speed
示例#19
0
def test_get_clearsky_valueerror(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    with pytest.raises(ValueError):
        clearsky = tus.get_clearsky(times, model='invalid_model')
示例#20
0
def find_clearsky_poa(df,
                      lat,
                      lon,
                      irradiance_poa_key='irradiance_poa_o_###',
                      mounting='fixed',
                      tilt=0,
                      azimuth=180,
                      altitude=0):
    loc = Location(lat, lon, altitude=altitude)

    CS = loc.get_clearsky(df.index)

    df['csghi'] = CS.ghi
    df['csdhi'] = CS.dhi
    df['csdni'] = CS.dni

    if mounting.lower() == "fixed":
        sun = get_solarposition(df.index, lat, lon)

        fixedpoa = get_total_irradiance(tilt, azimuth, sun.zenith, sun.azimuth,
                                        CS.dni, CS.ghi, CS.dhi)

        df['cspoa'] = fixedpoa.poa_global

    if mounting.lower() == "tracking":
        sun = get_solarposition(df.index, lat, lon)

        # default to axis_tilt=0 and axis_azimuth=180

        tracker_data = singleaxis(sun.apparent_zenith,
                                  sun.azimuth,
                                  axis_tilt=tilt,
                                  axis_azimuth=azimuth,
                                  max_angle=50,
                                  backtrack=True,
                                  gcr=0.35)

        track = get_total_irradiance(tracker_data['surface_tilt'],
                                     tracker_data['surface_azimuth'],
                                     sun.zenith, sun.azimuth, CS.dni, CS.ghi,
                                     CS.dhi)

        df['cspoa'] = track.poa_global

    # the following code is assuming clear sky poa has been generated per pvlib, aligned in the same
    # datetime index, and daylight savings or any time shifts were previously corrected
    # the inputs below were tuned for POA at a 15 minute frequency
    # note that detect_clearsky has a scaling factor but I still got slightly different results when I scaled measured poa first

    df['poa'] = df[irradiance_poa_key] / df[irradiance_poa_key].quantile(
        0.98) * df.cspoa.quantile(0.98)

    # inputs for detect_clearsky

    measured = df.poa.copy()
    clear = df.cspoa.copy()
    dur = 60
    lower_line_length = -41.416
    upper_line_length = 77.789
    var_diff = .00745
    mean_diff = 80
    max_diff = 90
    slope_dev = 3

    is_clear_results = detect_clearsky(measured.values,
                                       clear.values,
                                       df.index,
                                       dur,
                                       mean_diff,
                                       max_diff,
                                       lower_line_length,
                                       upper_line_length,
                                       var_diff,
                                       slope_dev,
                                       return_components=True)

    clearSeries = pd.Series(index=df.index, data=is_clear_results[0])

    clearSeries = clearSeries.reindex(index=df.index, method='ffill', limit=3)

    return clearSeries
#  TEST year 2015
times = pd.DatetimeIndex(start='2015-01-01', end='2016-01-01', freq='1min',tz=bvl.tz)   # 12 months 
# TEST year 2016
#times = pd.DatetimeIndex(start='2016-01-01', end='2017-01-01', freq='1min',tz=bvl.tz)   # 12 months 
# Test year 2017
#times = pd.DatetimeIndex(start='2017-01-01', end='2018-01-01', freq='1min',tz=bvl.tz)   # 12 months 


# In[512]:


if run_train:
   # TRAIN set
   times2010and2011 = pd.DatetimeIndex(start='2010-01-01', end='2012-01-01', freq='1min',
                           tz=bvl.tz)   # 24 months of 2010 and 2011 - For training
   cs_2010and2011 = bvl.get_clearsky(times2010and2011) # ineichen with climatology table by default
   cs_2010and2011.drop(['dni','dhi'],axis=1, inplace=True) #updating the same dataframe by dropping two columns
   cs_2010and2011.reset_index(inplace=True)

   cs_2010and2011['index']=cs_2010and2011['index'].apply(lambda x:x.to_datetime())
   cs_2010and2011['year'] = cs_2010and2011['index'].apply(lambda x:x.year)
   cs_2010and2011['month'] = cs_2010and2011['index'].apply(lambda x:x.month)
   cs_2010and2011['day'] = cs_2010and2011['index'].apply(lambda x:x.day)
   cs_2010and2011['hour'] = cs_2010and2011['index'].apply(lambda x:x.hour)
   cs_2010and2011['min'] = cs_2010and2011['index'].apply(lambda x:x.minute)


   cs_2010and2011.drop(cs_2010and2011.index[-1], inplace=True)
   print(cs_2010and2011.shape)
   cs_2010and2011.head()
示例#22
0
from conftest import (requires_ephem, requires_numba, needs_numpy_1_10,
                      pandas_0_22)

# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)

# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)

ephem_data = solarposition.get_solarposition(times,
                                             tus.latitude,
                                             tus.longitude,
                                             method='nrel_numpy')

irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)

dni_et = irradiance.extraradiation(times.dayofyear)

ghi = irrad_data['ghi']

# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203

def get_pvlib_data(latitude, longitude, tz, altitude, city, start_time,
                   end_time):

    # getting turbidity tables
    pvlib_path = os.path.dirname(os.path.abspath(pvlib.clearsky.__file__))
    filepath = os.path.join(pvlib_path, 'data', 'LinkeTurbidities.h5')

    def plot_turbidity_map(month, vmin=1, vmax=100):
        plt.figure()
        with tables.open_file(filepath) as lt_h5_file:
            ltdata = lt_h5_file.root.LinkeTurbidity[:, :, month - 1]
        plt.imshow(ltdata, vmin=vmin, vmax=vmax)
        # data is in units of 20 x turbidity
        plt.title('Linke turbidity x 20, ' + calendar.month_name[month])
        plt.colorbar(shrink=0.5)
        plt.tight_layout()

    plot_turbidity_map(1)

    plot_turbidity_map(7)

    # getting clearsky estimates
    loc = Location(latitude, longitude, tz, altitude, city)
    times = pd.date_range(start=start_time, end=end_time, freq='H', tz=loc.tz)
    cs = loc.get_clearsky(times)

    # getting pvlib forecasted irradiance based on cloud_cover
    #irrad_vars = ['ghi', 'dni', 'dhi']
    model = GFS()
    raw_data = model.get_data(latitude, longitude, start_time, end_time)
    data = raw_data

    # rename the columns according the key/value pairs in model.variables.
    data = model.rename(data)

    # convert temperature
    data['temp_air'] = model.kelvin_to_celsius(data['temp_air'])

    # convert wind components to wind speed
    data['wind_speed'] = model.uv_to_speed(data)

    # calculate irradiance estimates from cloud cover.
    # uses a cloud_cover to ghi to dni model or a
    # uses a cloud cover to transmittance to irradiance model.
    irrad_data = model.cloud_cover_to_irradiance(data['total_clouds'])

    # correcting timezone
    data.index = data.index.tz_convert(loc.tz)
    irrad_data.index = irrad_data.index.tz_convert(loc.tz)

    # joining cloud_cover and irradiance data frames
    data = data.join(irrad_data, how='outer')

    # renaming irradiance estimates
    cs.rename(columns={
        'ghi': 'GHI_clearsky',
        'dhi': 'DHI_clearsky',
        'dni': 'DNI_clearsky'
    },
              inplace=True)

    data.rename(columns={
        'ghi': 'GHI_pvlib',
        'dhi': 'DHI_pvlib',
        'dni': 'DNI_pvlib'
    },
                inplace=True)

    # joining clearsky with cloud_cover irradiances
    data = data.join(cs, how='outer')
    return (data)
示例#24
0
def test_get_clearsky_valueerror(times):
    tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
    with pytest.raises(ValueError):
        tus.get_clearsky(times, model='invalid_model')
示例#25
0
class ForecastModel(object):
    """
    An object for querying and holding forecast model information for
    use within the pvlib library.

    Simplifies use of siphon library on a THREDDS server.

    Parameters
    ----------
    model_type: string
        UNIDATA category in which the model is located.
    model_name: string
        Name of the UNIDATA forecast model.
    set_type: string
        Model dataset type.

    Attributes
    ----------
    access_url: string
        URL specifying the dataset from data will be retrieved.
    base_tds_url : string
        The top level server address
    catalog_url : string
        The url path of the catalog to parse.
    data: pd.DataFrame
        Data returned from the query.
    data_format: string
        Format of the forecast data being requested from UNIDATA.
    dataset: Dataset
        Object containing information used to access forecast data.
    dataframe_variables: list
        Model variables that are present in the data.
    datasets_list: list
        List of all available datasets.
    fm_models: Dataset
        TDSCatalog object containing all available
        forecast models from UNIDATA.
    fm_models_list: list
        List of all available forecast models from UNIDATA.
    latitude: list
        A list of floats containing latitude values.
    location: Location
        A pvlib Location object containing geographic quantities.
    longitude: list
        A list of floats containing longitude values.
    lbox: boolean
        Indicates the use of a location bounding box.
    ncss: NCSS object
        NCSS
    model_name: string
        Name of the UNIDATA forecast model.
    model: Dataset
        A dictionary of Dataset object, whose keys are the name of the
        dataset's name.
    model_url: string
        The url path of the dataset to parse.
    modelvariables: list
        Common variable names that correspond to queryvariables.
    query: NCSS query object
        NCSS object used to complete the forecast data retrival.
    queryvariables: list
        Variables that are used to query the THREDDS Data Server.
    time: DatetimeIndex
        Time range.
    variables: dict
        Defines the variables to obtain from the weather
        model and how they should be renamed to common variable names.
    units: dict
        Dictionary containing the units of the standard variables
        and the model specific variables.
    vert_level: float or integer
        Vertical altitude for query data.
    """

    access_url_key = 'NetcdfSubset'
    catalog_url = 'https://thredds.ucar.edu/thredds/catalog.xml'
    base_tds_url = catalog_url.split('/thredds/')[0]
    data_format = 'netcdf'

    units = {
        'temp_air': 'C',
        'wind_speed': 'm/s',
        'ghi': 'W/m^2',
        'ghi_raw': 'W/m^2',
        'dni': 'W/m^2',
        'dhi': 'W/m^2',
        'total_clouds': '%',
        'low_clouds': '%',
        'mid_clouds': '%',
        'high_clouds': '%'
    }

    def __init__(self, model_type, model_name, set_type, vert_level=None):
        self.model_type = model_type
        self.model_name = model_name
        self.set_type = set_type
        self.connected = False
        self.vert_level = vert_level

    def connect_to_catalog(self):
        self.catalog = TDSCatalog(self.catalog_url)
        self.fm_models = TDSCatalog(
            self.catalog.catalog_refs[self.model_type].href)
        self.fm_models_list = sorted(list(self.fm_models.catalog_refs.keys()))

        try:
            model_url = self.fm_models.catalog_refs[self.model_name].href
        except ParseError:
            raise ParseError(self.model_name + ' model may be unavailable.')

        try:
            self.model = TDSCatalog(model_url)
        except HTTPError:
            try:
                self.model = TDSCatalog(model_url)
            except HTTPError:
                raise HTTPError(self.model_name + ' model may be unavailable.')

        self.datasets_list = list(self.model.datasets.keys())
        self.set_dataset()
        self.connected = True

    def __repr__(self):
        return '{}, {}'.format(self.model_name, self.set_type)

    def set_dataset(self):
        '''
        Retrieves the designated dataset, creates NCSS object, and
        creates a NCSS query object.
        '''

        keys = list(self.model.datasets.keys())
        labels = [item.split()[0].lower() for item in keys]
        if self.set_type == 'best':
            self.dataset = self.model.datasets[keys[labels.index('best')]]
        elif self.set_type == 'latest':
            self.dataset = self.model.datasets[keys[labels.index('latest')]]
        elif self.set_type == 'full':
            self.dataset = self.model.datasets[keys[labels.index('full')]]

        self.access_url = self.dataset.access_urls[self.access_url_key]
        self.ncss = NCSS(self.access_url)
        self.query = self.ncss.query()

    def set_query_time_range(self, start, end):
        """
        Parameters
        ----------
        start : datetime.datetime, pandas.Timestamp
            Must be tz-localized.
        end : datetime.datetime, pandas.Timestamp
            Must be tz-localized.

        Notes
        -----
        Assigns ``self.start``, ``self.end``. Modifies ``self.query``
        """
        self.start = pd.Timestamp(start)
        self.end = pd.Timestamp(end)
        if self.start.tz is None or self.end.tz is None:
            raise TypeError('start and end must be tz-localized')
        self.query.time_range(self.start, self.end)

    def set_query_latlon(self):
        '''
        Sets the NCSS query location latitude and longitude.
        '''

        if (isinstance(self.longitude, list)
                and isinstance(self.latitude, list)):
            self.lbox = True
            # west, east, south, north
            self.query.lonlat_box(self.longitude[0], self.longitude[1],
                                  self.latitude[0], self.latitude[1])
        else:
            self.lbox = False
            self.query.lonlat_point(self.longitude, self.latitude)

    def set_location(self, tz, latitude, longitude):
        '''
        Sets the location for the query.

        Parameters
        ----------
        tz: tzinfo
            Timezone of the query
        latitude: float
            Latitude of the query
        longitude: float
            Longitude of the query

        Notes
        -----
        Assigns ``self.location``.
        '''
        self.location = Location(latitude, longitude, tz=tz)

    def get_data(self,
                 latitude,
                 longitude,
                 start,
                 end,
                 vert_level=None,
                 query_variables=None,
                 close_netcdf_data=True,
                 **kwargs):
        """
        Submits a query to the UNIDATA servers using Siphon NCSS and
        converts the netcdf data to a pandas DataFrame.

        Parameters
        ----------
        latitude: float
            The latitude value.
        longitude: float
            The longitude value.
        start: datetime or timestamp
            The start time.
        end: datetime or timestamp
            The end time.
        vert_level: None, float or integer, default None
            Vertical altitude of interest.
        query_variables: None or list, default None
            If None, uses self.variables.
        close_netcdf_data: bool, default True
            Controls if the temporary netcdf data file should be closed.
            Set to False to access the raw data.
        **kwargs:
            Additional keyword arguments are silently ignored.

        Returns
        -------
        forecast_data : DataFrame
            column names are the weather model's variable names.
        """

        if not self.connected:
            self.connect_to_catalog()

        if vert_level is not None:
            self.vert_level = vert_level

        if query_variables is None:
            self.query_variables = list(self.variables.values())
        else:
            self.query_variables = query_variables

        self.set_query_time_range(start, end)

        self.latitude = latitude
        self.longitude = longitude
        self.set_query_latlon()  # modifies self.query
        self.set_location(self.start.tz, latitude, longitude)

        if self.vert_level is not None:
            self.query.vertical_level(self.vert_level)

        self.query.variables(*self.query_variables)
        self.query.accept(self.data_format)

        self.netcdf_data = self.ncss.get_data(self.query)

        # might be better to go to xarray here so that we can handle
        # higher dimensional data for more advanced applications
        self.data = self._netcdf2pandas(self.netcdf_data, self.query_variables,
                                        self.start, self.end)

        if close_netcdf_data:
            self.netcdf_data.close()

        return self.data

    def process_data(self, data, **kwargs):
        """
        Defines the steps needed to convert raw forecast data
        into processed forecast data. Most forecast models implement
        their own version of this method which also call this one.

        Parameters
        ----------
        data: DataFrame
            Raw forecast data

        Returns
        -------
        data: DataFrame
            Processed forecast data.
        """
        data = self.rename(data)
        return data

    def get_processed_data(self, *args, **kwargs):
        """
        Get and process forecast data.

        Parameters
        ----------
        *args: positional arguments
            Passed to get_data
        **kwargs: keyword arguments
            Passed to get_data and process_data

        Returns
        -------
        data: DataFrame
            Processed forecast data
        """
        return self.process_data(self.get_data(*args, **kwargs), **kwargs)

    def rename(self, data, variables=None):
        """
        Renames the columns according the variable mapping.

        Parameters
        ----------
        data: DataFrame
        variables: None or dict, default None
            If None, uses self.variables

        Returns
        -------
        data: DataFrame
            Renamed data.
        """
        if variables is None:
            variables = self.variables
        return data.rename(columns={y: x for x, y in variables.items()})

    def _netcdf2pandas(self, netcdf_data, query_variables, start, end):
        """
        Transforms data from netcdf to pandas DataFrame.

        Parameters
        ----------
        data: netcdf
            Data returned from UNIDATA NCSS query.
        query_variables: list
            The variables requested.
        start: Timestamp
            The start time
        end: Timestamp
            The end time

        Returns
        -------
        pd.DataFrame
        """
        # set self.time
        try:
            time_var = 'time'
            self.set_time(netcdf_data.variables[time_var])
        except KeyError:
            # which model does this dumb thing?
            time_var = 'time1'
            self.set_time(netcdf_data.variables[time_var])

        data_dict = {}
        for key, data in netcdf_data.variables.items():
            # if accounts for possibility of extra variable returned
            if key not in query_variables:
                continue
            squeezed = data[:].squeeze()

            # If the data is big endian, swap the byte order to make it
            # little endian
            if squeezed.dtype.byteorder == '>':
                squeezed = squeezed.byteswap().newbyteorder()
            if squeezed.ndim == 1:
                data_dict[key] = squeezed
            elif squeezed.ndim == 2:
                for num, data_level in enumerate(squeezed.T):
                    data_dict[key + '_' + str(num)] = data_level
            else:
                raise ValueError('cannot parse ndim > 2')

        data = pd.DataFrame(data_dict, index=self.time)
        # sometimes data is returned as hours since T0
        # where T0 is before start. Then the hours between
        # T0 and start are added *after* end. So sort and slice
        # to remove the garbage
        data = data.sort_index().loc[start:end]
        return data

    def set_time(self, time):
        '''
        Converts time data into a pandas date object.

        Parameters
        ----------
        time: netcdf
            Contains time information.

        Returns
        -------
        pandas.DatetimeIndex
        '''
        times = num2date(time[:].squeeze(),
                         time.units,
                         only_use_cftime_datetimes=False,
                         only_use_python_datetimes=True)
        self.time = pd.DatetimeIndex(pd.Series(times), tz=self.location.tz)

    def cloud_cover_to_ghi_linear(self,
                                  cloud_cover,
                                  ghi_clear,
                                  offset=35,
                                  **kwargs):
        """
        Convert cloud cover to GHI using a linear relationship.

        0% cloud cover returns ghi_clear.

        100% cloud cover returns offset*ghi_clear.

        Parameters
        ----------
        cloud_cover: numeric
            Cloud cover in %.
        ghi_clear: numeric
            GHI under clear sky conditions.
        offset: numeric, default 35
            Determines the minimum GHI.
        kwargs
            Not used.

        Returns
        -------
        ghi: numeric
            Estimated GHI.

        References
        ----------
        Larson et. al. "Day-ahead forecasting of solar power output from
        photovoltaic plants in the American Southwest" Renewable Energy
        91, 11-20 (2016).
        """

        offset = offset / 100.
        cloud_cover = cloud_cover / 100.
        ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear
        return ghi

    def cloud_cover_to_irradiance_clearsky_scaling(self,
                                                   cloud_cover,
                                                   method='linear',
                                                   **kwargs):
        """
        Estimates irradiance from cloud cover in the following steps:

        1. Determine clear sky GHI using Ineichen model and
           climatological turbidity.
        2. Estimate cloudy sky GHI using a function of
           cloud_cover e.g.
           :py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
        3. Estimate cloudy sky DNI using the DISC model.
        4. Calculate DHI from DNI and GHI.

        Parameters
        ----------
        cloud_cover : Series
            Cloud cover in %.
        method : str, default 'linear'
            Method for converting cloud cover to GHI.
            'linear' is currently the only option.
        **kwargs
            Passed to the method that does the conversion

        Returns
        -------
        irrads : DataFrame
            Estimated GHI, DNI, and DHI.
        """
        solpos = self.location.get_solarposition(cloud_cover.index)
        cs = self.location.get_clearsky(cloud_cover.index,
                                        model='ineichen',
                                        solar_position=solpos)

        method = method.lower()
        if method == 'linear':
            ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
                                                 **kwargs)
        else:
            raise ValueError('invalid method argument')

        dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
        dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))

        irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
        return irrads

    def cloud_cover_to_transmittance_linear(self,
                                            cloud_cover,
                                            offset=0.75,
                                            **kwargs):
        """
        Convert cloud cover to atmospheric transmittance using a linear
        model.

        0% cloud cover returns offset.

        100% cloud cover returns 0.

        Parameters
        ----------
        cloud_cover : numeric
            Cloud cover in %.
        offset : numeric, default 0.75
            Determines the maximum transmittance.
        kwargs
            Not used.

        Returns
        -------
        ghi : numeric
            Estimated GHI.
        """
        transmittance = ((100.0 - cloud_cover) / 100.0) * offset

        return transmittance

    def cloud_cover_to_irradiance_liujordan(self, cloud_cover, **kwargs):
        """
        Estimates irradiance from cloud cover in the following steps:

        1. Determine transmittance using a function of cloud cover e.g.
           :py:meth:`~ForecastModel.cloud_cover_to_transmittance_linear`
        2. Calculate GHI, DNI, DHI using the
           :py:func:`pvlib.irradiance.liujordan` model

        Parameters
        ----------
        cloud_cover : Series

        Returns
        -------
        irradiance : DataFrame
            Columns include ghi, dni, dhi
        """
        # in principle, get_solarposition could use the forecast
        # pressure, temp, etc., but the cloud cover forecast is not
        # accurate enough to justify using these minor corrections
        solar_position = self.location.get_solarposition(cloud_cover.index)
        dni_extra = get_extra_radiation(cloud_cover.index)
        airmass = self.location.get_airmass(cloud_cover.index)

        transmittance = self.cloud_cover_to_transmittance_linear(
            cloud_cover, **kwargs)

        irrads = liujordan(solar_position['apparent_zenith'],
                           transmittance,
                           airmass['airmass_absolute'],
                           dni_extra=dni_extra)
        irrads = irrads.fillna(0)

        return irrads

    def cloud_cover_to_irradiance(self,
                                  cloud_cover,
                                  how='clearsky_scaling',
                                  **kwargs):
        """
        Convert cloud cover to irradiance. A wrapper method.

        Parameters
        ----------
        cloud_cover : Series
        how : str, default 'clearsky_scaling'
            Selects the method for conversion. Can be one of
            clearsky_scaling or liujordan.
        **kwargs
            Passed to the selected method.

        Returns
        -------
        irradiance : DataFrame
            Columns include ghi, dni, dhi
        """

        how = how.lower()
        if how == 'clearsky_scaling':
            irrads = self.cloud_cover_to_irradiance_clearsky_scaling(
                cloud_cover, **kwargs)
        elif how == 'liujordan':
            irrads = self.cloud_cover_to_irradiance_liujordan(
                cloud_cover, **kwargs)
        else:
            raise ValueError('invalid how argument')

        return irrads

    def kelvin_to_celsius(self, temperature):
        """
        Converts Kelvin to celsius.

        Parameters
        ----------
        temperature: numeric

        Returns
        -------
        temperature: numeric
        """
        return temperature - 273.15

    def isobaric_to_ambient_temperature(self, data):
        """
        Calculates temperature from isobaric temperature.

        Parameters
        ----------
        data: DataFrame
            Must contain columns pressure, temperature_iso,
            temperature_dew_iso. Input temperature in K.

        Returns
        -------
        temperature : Series
            Temperature in K
        """

        P = data['pressure'] / 100.0  # noqa: N806
        Tiso = data['temperature_iso']  # noqa: N806
        Td = data['temperature_dew_iso'] - 273.15  # noqa: N806

        # saturation water vapor pressure
        e = 6.11 * 10**((7.5 * Td) / (Td + 273.3))

        # saturation water vapor mixing ratio
        w = 0.622 * (e / (P - e))

        temperature = Tiso - ((2.501 * 10.**6) / 1005.7) * w

        return temperature

    def uv_to_speed(self, data):
        """
        Computes wind speed from wind components.

        Parameters
        ----------
        data : DataFrame
            Must contain the columns 'wind_speed_u' and 'wind_speed_v'.

        Returns
        -------
        wind_speed : Series
        """
        wind_speed = np.sqrt(data['wind_speed_u']**2 + data['wind_speed_v']**2)

        return wind_speed

    def gust_to_speed(self, data, scaling=1 / 1.4):
        """
        Computes standard wind speed from gust.
        Very approximate and location dependent.

        Parameters
        ----------
        data : DataFrame
            Must contain the column 'wind_speed_gust'.

        Returns
        -------
        wind_speed : Series
        """
        wind_speed = data['wind_speed_gust'] * scaling

        return wind_speed
drk = Location(36.621, -116.043, 'US/Pacific', 1010.1072, 'Desert Rock')

# In[6]:

times2009 = pd.DatetimeIndex(start='2009-01-01',
                             end='2010-01-01',
                             freq='1min',
                             tz=drk.tz)  # 12 months of 2009 - For testing
times2010and2011 = pd.DatetimeIndex(
    start='2010-01-01', end='2012-01-01', freq='1min',
    tz=drk.tz)  # 24 months of 2010 and 2011 - For training

# In[7]:

cs_2009 = drk.get_clearsky(times2009)
cs_2010and2011 = drk.get_clearsky(
    times2010and2011)  # ineichen with climatology table by default
#cs_2011 = bvl.get_clearsky(times2011)

# In[8]:

cs_2009.drop(
    ['dni', 'dhi'], axis=1,
    inplace=True)  #updating the same dataframe by dropping two columns
cs_2010and2011.drop(
    ['dni', 'dhi'], axis=1,
    inplace=True)  #updating the same dataframe by dropping two columns
#cs_2011.drop(['dni','dhi'],axis=1, inplace=True) #updating the same dataframe by dropping two columns

# In[9]:
示例#27
0
class PVSim:
    """ This PV Simulator calculates the output of a PV panel based on its parameters such as location, orientation,
        and rated power using the pvlib module.
        The output corresponds to the PV production in kilowatts, producers are assumed to have positive values."""
    system: PVSystem
    location: Location
    mc: ModelChain
    subscriber: Subscriber
    logger: Logger

    def __init__(self):
        self.system = PVSystem(
            module_parameters={
                'pdc0': _PV_SYS_CONFIG['pdc0'],
                'gamma_pdc': _PV_SYS_CONFIG['gamma_pdc']
            },
            inverter_parameters={'pdc0': _PV_SYS_CONFIG['pdc0']},
            temperature_model_parameters=_TEMP_MODEL_PARAMS,
            surface_tilt=_PV_SYS_CONFIG['surface_tilt'],
            surface_azimuth=_PV_SYS_CONFIG['surface_azimuth'])
        self.location = Location(latitude=_PV_SYS_CONFIG['latitude'],
                                 longitude=_PV_SYS_CONFIG['longitude'])
        self.mc = ModelChain(self.system,
                             self.location,
                             aoi_model='physical',
                             spectral_model='no_loss')

        self.logger = Logger(_LOG_FILEPATH)

        self.subscriber = Subscriber(callback_ctrl=self._on_new_meter_ctrl,
                                     callback_data=self._on_new_meter_data)

    def run(self):
        self.logger.writerow(
            ('Datetime', 'Pac_HH[kW]', 'Pac_PV[kW]', 'Pac_sum[kW]'))
        self.subscriber.run()

    def get_pac_kw(self, times: pd.DatetimeIndex):
        weather = self.location.get_clearsky(times=times)
        self.mc.run_model(weather)
        return self.mc.ac / 1e3

    def _on_new_meter_data(self, timestamp, meter_pac_kw):
        times = pd.date_range(start=timestamp,
                              end=timestamp,
                              tz=_PV_SYS_CONFIG['timezone'])
        pv_pac_kw = self.get_pac_kw(times).values[0]
        sum_pac_kw = meter_pac_kw + pv_pac_kw
        data = (timestamp, meter_pac_kw, pv_pac_kw, sum_pac_kw)
        # print(data)
        self.logger.writerow(data)

    def _on_new_meter_ctrl(self, msg):
        # print(msg)
        if msg == "done":
            self._cleanup()

    def _cleanup(self):
        self.subscriber.close()
        self.logger.close()
        self._plot_results_to_file()

    def _plot_results_to_file(self):
        df = pd.read_csv(_LOG_FILEPATH, index_col=0)

        df.plot()
        plt.xlabel('Zeit')
        plt.ylabel('Leistung in kW')
        plt.setp(plt.xticks()[1], rotation=30, ha='right')
        plt.grid()
        plt.subplots_adjust(left=0.2, bottom=0.25)
        plt.savefig(f'{_RESULTS_DIR_PATH}/plot.png', dpi=150)