Exemplo n.º 1
0
    def test_compute_gross_energy(self):
        # Set some test values
        net = np.array([[1, 1, 1],
                        [0, 1, 1],
                        [1, 1, 1]])
        avail = np.array([[0.05, 0.08, 0.2],
                          [0.05, .9, 0.2],
                          [0.05, .9, 0]])
        curt = np.array([[0.05, 0.05, 0.05],
                         [0.05, 0.2, 0.05],
                         [0.05, -0.1, 0.05]])

        # Make sure different combinations of 'frac' and 'energy' based measurements work out
        nptest.assert_almost_equal(
            unit_conversion.compute_gross_energy(net[0, :], avail[0, :], curt[0, :], 'frac', 'frac'),
            [1.1111, 1.1494, 1.3333],
            decimal=4)
        nptest.assert_almost_equal(
            unit_conversion.compute_gross_energy(net[0, :], avail[0, :], curt[0, :], 'energy', 'frac'),
            [1.1026, 1.1326, 1.2526],
            decimal=4)
        nptest.assert_almost_equal(
            unit_conversion.compute_gross_energy(net[0, :], avail[0, :], curt[0, :], 'frac', 'energy'),
            [1.1026, 1.1370, 1.3000],
            decimal=4)
        nptest.assert_almost_equal(
            unit_conversion.compute_gross_energy(net[0, :], avail[0, :], curt[0, :], 'energy', 'energy'),
            [1.1000, 1.1300, 1.2500],
            decimal=4)

        # Make sure exceptions are thrown when bad input data is identified
        def func():  # Function to return exception
            unit_conversion.compute_gross_energy(net[1, :], avail[1, :], curt[1, :], 'frac', 'frac')

        with self.assertRaises(Exception):
            func()

        def func():  # Function to return exception
            unit_conversion.compute_gross_energy(net[2, :], avail[2, :], curt[2, :], 'frac', 'frac')

        with self.assertRaises(Exception):
            func()
Exemplo n.º 2
0
    def process_loss_estimates(self):
        """
        Append availability and curtailment losses to monthly data frame

        Args:
            (None)

        Returns:
            (None)

        """
        df = getattr(self._plant, 'curtail').df

        curt_monthly = np.divide(df.resample('MS')[['availability_kwh', 'curtailment_kwh']].sum(),
                                 1e6)  # Get sum of avail and curt losses in GWh
        curt_monthly.rename(columns={'availability_kwh': 'availability_gwh', 'curtailment_kwh': 'curtailment_gwh'},
                            inplace=True)

        # Merge with revenue meter monthly data
        self._monthly.df = self._monthly.df.join(curt_monthly)

        # Add gross energy field
        self._monthly.df['gross_energy_gwh'] = un.compute_gross_energy(self._monthly.df['energy_gwh'],
                                                                       self._monthly.df['availability_gwh'],
                                                                       self._monthly.df['curtailment_gwh'], 'energy',
                                                                       'energy')

        # Calculate percentage-based losses
        self._monthly.df['availability_pct'] = np.divide(self._monthly.df['availability_gwh'],
                                                         self._monthly.df['gross_energy_gwh'])
        self._monthly.df['curtailment_pct'] = np.divide(self._monthly.df['curtailment_gwh'],
                                                        self._monthly.df['gross_energy_gwh'])

        self._monthly.df['avail_nan_perc'] = df.resample('MS')['availability_kwh'].apply(
            tm.percent_nan)  # Get percentage of 10-min meter data that were NaN when summing to monthly
        self._monthly.df['curt_nan_perc'] = df.resample('MS')['curtailment_kwh'].apply(
            tm.percent_nan)  # Get percentage of 10-min meter data that were NaN when summing to monthly

        self._monthly.df['nan_flag'] = False  # Set flag to false by default
        self._monthly.df.loc[(self._monthly.df['energy_nan_perc'] > self.uncertainty_nan_energy) |
                             (self._monthly.df['avail_nan_perc'] > self.uncertainty_nan_energy) |
                             (self._monthly.df['curt_nan_perc'] > self.uncertainty_nan_energy), 'nan_flag'] \
            = True  # If more than 1% of data are NaN, set flag to True

        # By default, assume all reported losses are representative of long-term operational
        self._monthly.df['availability_typical'] = True
        self._monthly.df['curtailment_typical'] = True

        # By default, assume combined availability and curtailment losses are below the threshold to be considered valid
        self._monthly.df['combined_loss_valid'] = True
Exemplo n.º 3
0
 def func():  # Function to return exception
     unit_conversion.compute_gross_energy(net[2, :], avail[2, :], curt[2, :], 'frac', 'frac')
Exemplo n.º 4
0
    def prepare(self):
        """
        Do all loading and preparation of the data for this plant.
        """

        # Set time frequencies of data
        self._meter_freq = '1MS'  # Monthly meter data
        self._curtail_freq = '1MS'  # Monthly curtailment data
        
        ######################
        # MONTHLY METER DATA #
        ######################
        
        self._meter.load(self._path, 'plant_data', 'csv')  # Load monthly data
        
        # Get 'time' field in datetime format
        self._meter.df['time'] = pd.to_datetime(self._meter.df['year_month'], format='%Y %m ') 
        self._meter.df.set_index('time', inplace=True, drop=False) # Set datetime as index
        
        # Rename variables
        self._meter.df['energy_kwh'] = self._meter.df['net_energy_mwh']*1000. # convert MWh to kWh
        
        # Remove the fields we are not yet interested in
        self._meter.df.drop(['year_month', 'net_energy_mwh', 'availability_pct', 'curtailment_pct'], axis=1, inplace=True)
        
        #############################################
        # MONTHLY CURTAILMENT AND AVAILABILITY DATA #
        #############################################
        
        self._curtail.load(self._path, 'plant_data', 'csv')  # Load monthly data
        
        # Get 'time' field in datetime format
        self._curtail.df['time'] = pd.to_datetime(self._curtail.df['year_month'], format='%Y %m') 
        self._curtail.df.set_index('time', inplace=True, drop=False) # Set datetime as index
        
        # Get losses in energy units
        gross_energy = un.compute_gross_energy(self._curtail.df['net_energy_mwh'], 
                                                  self._curtail.df['availability_pct'],
                                                  self._curtail.df['curtailment_pct'], 'frac', 'frac')
        self._curtail.df['curtailment_kwh'] = self._curtail.df['curtailment_pct']*gross_energy*1000
        self._curtail.df['availability_kwh'] = self._curtail.df['availability_pct']*gross_energy*1000
           
        # Remove the fields we are not yet interested in
        self._curtail.df.drop(['net_energy_mwh', 'year_month'], axis=1, inplace=True)
                
        ###################
        # REANALYSIS DATA #
        ###################       
       
        # merra2
        self._reanalysis._product['merra2'].load(self._path,"merra2_data","csv")
        self._reanalysis._product['merra2'].rename_columns({"time":"datetime",
                                    "windspeed_ms": "ws_50m",
                                    "rho_kgm-3": "dens_50m",
                                    "winddirection_deg":"wd_50m"})
        self._reanalysis._product['merra2'].normalize_time_to_datetime("%Y-%m-%d %H:%M:%S")
        self._reanalysis._product['merra2'].df.set_index('time',inplace=True,drop=False)
        
        # ncep2
        self._reanalysis._product['ncep2'].load(self._path,"ncep2_data","csv")
        self._reanalysis._product['ncep2'].rename_columns({"time":"datetime",
                                    "windspeed_ms": "ws_10m",
                                    "rho_kgm-3": "dens_10m",
                                    "winddirection_deg":"wd_10m"})
        self._reanalysis._product['ncep2'].normalize_time_to_datetime("%Y%m%d %H%M")
        self._reanalysis._product['ncep2'].df.set_index('time',inplace=True,drop=False)
        
        # erai
        self._reanalysis._product['erai'].load(self._path,"erai_data","csv")
        self._reanalysis._product['erai'].rename_columns({"time":"datetime",
                                    "windspeed_ms": "ws_58",
                                    "rho_kgm-3": "dens_58",
                                    "winddirection_deg":"wd_58"})
        self._reanalysis._product['erai'].normalize_time_to_datetime("%Y-%m-%d %H:%M:%S")
        self._reanalysis._product['erai'].df.set_index('time',inplace=True,drop=False)