Beispiel #1
0
    def test_tcell_filter(self):
        filtered = tcell_filter(self.tcell,
                                temperature_cell_low=-50,
                                temperature_cell_high=110)

        # Expected high and low tcell cutoffs to be non-inclusive.
        expected_result = np.array([False, True, True, True, False])
        self.assertListEqual(filtered.tolist(), expected_result.tolist())
Beispiel #2
0
def test_tcell_filter():
    ''' Unit tests for cell temperature filter.'''

    tcell = np.array([-50, -49, 0, 109, 110])
    filtered = tcell_filter(tcell,
                            temperature_cell_low=-50,
                            temperature_cell_high=110)

    # Expected high and low tcell cutoffs to be non-inclusive.
    expected_result = np.array([False, True, True, True, False])
    assert filtered.tolist() == expected_result.tolist()
# Plot the normalized power time series
fig, ax = plt.subplots()
ax.plot(normalized.index, normalized, 'o', alpha=0.05)
ax.set_ylim(0, 2)
fig.autofmt_xdate()
ax.set_ylabel('Normalized energy')

# 2: Filter
# Data filtering is used to exclude data points that represent invalid data, create bias in the analysis, or introduce significant noise.
# It can also be useful to remove outages and outliers. Sometimes outages appear as low but non-zero yield. Automatic functions for this are not yet included in `rdtools`. Such filters should be implimented by the analyst if needed.

# Calculate a collection of boolean masks that can be used
# to filter the time series
nz_mask = (df['normalized'] > 0)
poa_mask = rdtools.poa_filter(df['poa'])
tcell_mask = rdtools.tcell_filter(df['Tcell'])
clip_mask = rdtools.clip_filter(df['power'])

# filter the time series and keep only the columns needed for the
# remaining steps
filtered = df[nz_mask & poa_mask & tcell_mask & clip_mask]
filtered = filtered[['insolation', 'normalized']]

fig, ax = plt.subplots()
ax.plot(filtered.index, filtered.normalized, 'o', alpha=0.05)
ax.set_ylim(0, 2)
fig.autofmt_xdate()
ax.set_ylabel('Normalized energy')

# 3: Aggregate
# Data is aggregated with an irradiance weighted average. This can be useful, for example with daily aggregation, to reduce the impact of high-error data points in the morning and evening.
Beispiel #4
0
def average_degradation():
    file_name = config['real_time_data']

    df = pd.read_csv(file_name)
    df = df.rename(
        columns={
            u'12 BP Solar - Active Power (kW)': 'power',
            u'12 BP Solar - Wind Speed (m/s)': 'wind',
            u'12 BP Solar - Weather Temperature Celsius (\xb0C)': 'Tamb',
            u'12 BP Solar - Global Horizontal Radiation (W/m\xb2)': 'ghi',
            u'12 BP Solar - Diffuse Horizontal Radiation (W/m\xb2)': 'dhi'
        })

    # Specify the Metadata
    meta = config

    df.index = pd.to_datetime(df.Timestamp)
    # TZ is required for irradiance transposition
    df.index = df.index.tz_localize(meta['timezone'], ambiguous='infer')

    # Explicitly trim the dates so that runs of this example notebook
    # are comparable when the source dataset has been downloaded at different times
    df = df[config['start_date']:config['end_date']]

    # Change power from kilowatts to watts
    df['power'] = df.power * 1000.0
    # There is some missing data, but we can infer the frequency from the first several data points
    freq = pd.infer_freq(df.index[:10])

    # And then set the frequency of the dataframe
    df = df.resample(freq).median()

    # Calculate energy yield in Wh
    df['energy'] = df.power * pd.to_timedelta(
        df.power.index.freq).total_seconds() / (3600.0)

    # Calculate POA irradiance from DHI, GHI inputs
    loc = pvlib.location.Location(meta['latitude'],
                                  meta['longitude'],
                                  tz=meta['timezone'])
    sun = loc.get_solarposition(df.index)

    # calculate the POA irradiance
    sky = pvlib.irradiance.isotropic(meta['tilt'], df.dhi)
    df['dni'] = (df.ghi - df.dhi) / np.cos(np.deg2rad(sun.zenith))
    beam = pvlib.irradiance.beam_component(meta['tilt'], meta['azimuth'],
                                           sun.zenith, sun.azimuth, df.dni)
    df['poa'] = beam + sky

    # Calculate cell temperature
    df_temp = pvlib.pvsystem.sapm_celltemp(df.poa,
                                           df.wind,
                                           df.Tamb,
                                           model=meta['temp_model'])
    df['Tcell'] = df_temp.temp_cell

    # Specify the keywords for the pvwatts model
    pvwatts_kws = {
        "poa_global": df.poa,
        "P_ref": meta['pdc'],
        "T_cell": df.Tcell,
        "G_ref": 1000,  # reference irradiance
        "T_ref": 25,  # reference temperature
        "gamma_pdc": meta['tempco']
    }

    # Calculate the normaliztion, the function also returns the relevant insolation for
    # each point in the normalized PV energy timeseries
    normalized, insolation = rdtools.normalize_with_pvwatts(
        df.energy, pvwatts_kws)

    df['normalized'] = normalized
    df['insolation'] = insolation

    # Calculate a collection of boolean masks that can be used
    # to filter the time series
    nz_mask = (df['normalized'] > 0)
    poa_mask = rdtools.poa_filter(df['poa'])
    tcell_mask = rdtools.tcell_filter(df['Tcell'])
    clip_mask = rdtools.clip_filter(df['power'])

    # filter the time series and keep only the columns needed for the
    # remaining steps
    filtered = df[nz_mask & poa_mask & tcell_mask & clip_mask]
    filtered = filtered[['insolation', 'normalized']]

    daily = rdtools.aggregation_insol(filtered.normalized,
                                      filtered.insolation,
                                      frequency='D')

    # Calculate the degradation rate using the YoY method
    yoy_rd, yoy_ci, yoy_info = rdtools.degradation_year_on_year(
        daily, confidence_level=68.2)

    # yoy_rd is mean degradation rate per year.
    return (df, 0.01 * yoy_rd)