예제 #1
0
 def grow_drop_data(self, years, frequency, load_growth):
     if self.site_thermal_load_exists:
         self.site_steam_load = Lib.fill_extra_data(self.site_steam_load,
                                                    years, load_growth,
                                                    frequency)
         self.site_steam_load = Lib.drop_extra_data(self.site_steam_load,
                                                    years)
         self.site_hotwater_load = Lib.fill_extra_data(
             self.site_hotwater_load, years, load_growth, frequency)
         self.site_hotwater_load = Lib.drop_extra_data(
             self.site_hotwater_load, years)
예제 #2
0
 def grow_drop_data(self, years, frequency, load_growth):
     if self.site_hotwater_load is not None:
         self.site_hotwater_load = Lib.fill_extra_data(
             self.site_hotwater_load, years, load_growth, frequency)
         self.site_hotwater_load = Lib.drop_extra_data(
             self.site_hotwater_load, years)
     if self.site_steam_load is not None:
         self.site_steam_load = Lib.fill_extra_data(self.site_steam_load,
                                                    years, load_growth,
                                                    frequency)
         self.site_steam_load = Lib.drop_extra_data(self.site_steam_load,
                                                    years)
예제 #3
0
    def grow_drop_data(self, years, frequency, load_growth):
        """ Adds data by growing the given data OR drops any extra data that might have slipped in.
        Update variable that hold timeseries data after adding growth data. These method should be called after
        add_growth_data and before the optimization is run.

        Args:
            years (List): list of years for which analysis will occur on
            frequency (str): period frequency of the timeseries data
            load_growth (float): percent/ decimal value of the growth rate of loads in this simulation

        """
        self.vars_percent = Lib.fill_extra_data(self.vars_percent, years, 0, 'M')
        self.vars_percent = Lib.drop_extra_data(self.vars_percent, years)
예제 #4
0
    def __init__(self, constraint_type, limit_type, years_of_analysis,
                 datetime_freq):
        """Initialize a constraint.

        Args:
            constraint_type (str): state of energy, charge, or discharge (not case sensitive)
            limit_type (str): maximum limit or minimum (not case sensitive)
            years_of_analysis (list): list of years that should be included in the returned Index
            datetime_freq (str): the pandas frequency in string representation -- required to create dateTime rang

        """
        self.type = constraint_type.lower()
        self.is_max = limit_type.lower() == 'max'
        self.is_min = limit_type.lower() == 'min'
        if limit_type not in ['max', 'min']:
            raise SyntaxWarning("limit_type can be 'max' or 'min'")

        index = Lib.create_timeseries_index(years_of_analysis, datetime_freq)
        size = len(index)
        self.parents = pd.DataFrame(columns=[
            'DateTime', 'Parent'
        ])  # records which valuestreams have had a non-base value
        self.owner = pd.Series(
            np.repeat('null', size),
            index=index)  # records which valuestream(s) set the current VALUE

        # records the value that would ensure all past updated requirements would also be met
        if self.is_max:
            self.value = pd.Series(np.repeat(VERY_LARGE_NUMBER, size),
                                   index=index)
        if self.is_min:
            self.value = pd.Series(np.zeros(size), index=index)
예제 #5
0
    def calc_retail_energy_price(tariff, freq, analysis_yr, non_zero=True):
        """ transforms tariff data file into time series dataFrame

        Args:
            tariff (DataFrame): raw tariff dataframe.
            freq (str): the frequency of the timeseries data we are working with
            analysis_yr (float): Year for which to build the tariff for

        Returns: a DataFrame with the index beginning at hour 0

        """
        temp = pd.DataFrame(
            index=Lib.create_timeseries_index([analysis_yr], freq))
        size = len(temp)

        # Build Energy Price Vector
        temp['he'] = (temp.index + pd.Timedelta('1s')).hour + 1
        temp.loc[:, 'p_energy'] = np.zeros(shape=size)

        billing_period = [[] for _ in range(size)]

        for p in tariff.index:
            # edit the pricedf energy price and period values for all of the periods defined
            # in the tariff input file
            bill = tariff.loc[p, :]
            mask = Financial.create_bill_period_mask(bill, temp.index.month,
                                                     temp['he'],
                                                     temp.index.weekday)
            if bill['Charge'].lower() == 'energy':
                current_energy_prices = temp.loc[mask, 'p_energy'].values
                if np.any(np.greater(current_energy_prices, 0)):
                    # More than one energy price applies to the same time step
                    TellUser.warning(
                        'More than one energy price applies to the same time step.'
                    )
                # Add energy prices
                temp.loc[mask, 'p_energy'] += bill['Value']
            elif bill['Charge'].lower() == 'demand':
                # record billing period
                for i, true_false in enumerate(mask):
                    if true_false:
                        billing_period[i].append(p)
        billing_period = pd.DataFrame({'billing_period': billing_period},
                                      dtype='object')
        temp.loc[:, 'billing_period'] = billing_period.values

        # ADD CHECK TO MAKE SURE ENERGY PRICES ARE THE SAME FOR EACH OVERLAPPING BILLING PERIOD
        # Check to see that each timestep has a period assigned to it

        if (not billing_period.apply(len).all() or np.any(
                np.equal(temp.loc[:, 'p_energy'].values, 0))) and non_zero:
            TellUser.error(
                'The billing periods in the input file do not partition the year, '
                + 'please check the tariff input file')
            raise TariffError('Please check the retail tariff')
        return temp
예제 #6
0
    def grow_drop_data(self, years, frequency, load_growth):
        """ Adds data by growing the given data OR drops any extra data that might have slipped in.
        Update variable that hold timeseries data after adding growth data. These method should be called after
        add_growth_data and before the optimization is run.

        Args:
            years (List): list of years for which analysis will occur on
            frequency (str): period frequency of the timeseries data
            load_growth (float): percent/ decimal value of the growth rate of loads in this simulation

        """
        # timeseries data
        self.system_load = Lib.fill_extra_data(self.system_load, years,
                                               load_growth, frequency)
        self.system_load = Lib.drop_extra_data(self.system_load, years)

        if 'active hours' in self.idmode:
            self.active = Lib.fill_extra_data(self.active, years, 0, frequency)
            self.active = Lib.drop_extra_data(self.active, years)
            self.active = self.active == 1

        # monthly data
        self.capacity_rate = Lib.fill_extra_data(self.capacity_rate, years, 0,
                                                 'M')
        self.capacity_rate = Lib.drop_extra_data(self.capacity_rate, years)
예제 #7
0
    def grow_drop_data(self, years, frequency, load_growth):
        """ Adds data by growing the given data OR drops any extra data that might have slipped in.
        Update variable that hold timeseries data after adding growth data. These method should be called after
        add_growth_data and before the optimization is run.

        Args:
            years (List): list of years for which analysis will occur on
            frequency (str): period frequency of the timeseries data
            load_growth (float): percent/ decimal value of the growth rate of loads in this simulation


        """
        data_year = self.price.index.year.unique()
        no_data_year = {pd.Period(year)
                        for year in years} - {
                            pd.Period(year)
                            for year in data_year
                        }  # which years do we not have data for

        if len(no_data_year) > 0:
            for yr in no_data_year:
                source_year = pd.Period(max(data_year))

                years = yr.year - source_year.year

                # Build Energy Price Vector based on the new year
                new_index = Lib.create_timeseries_index([yr.year], frequency)
                temp = pd.DataFrame(index=new_index)
                weekday = (new_index.weekday < SATURDAY).astype('int64')
                he = (new_index + pd.Timedelta('1s')).hour + 1
                temp['price'] = np.zeros(len(new_index))

                for p in range(len(self.tariff)):
                    # edit the pricedf energy price and period values for all of the periods defined
                    # in the tariff input file
                    bill = self.tariff.iloc[p, :]
                    mask = Financial.create_bill_period_mask(
                        bill, temp.index.month, he, weekday)

                    current_energy_prices = temp.loc[mask, 'price'].values
                    if np.any(np.greater(current_energy_prices, 0)):
                        # More than one energy price applies to the same time step
                        TellUser.warning(
                            'More than one energy price applies to the same time step.'
                        )
                    # Add energy prices
                    temp.loc[mask, 'price'] += bill['Value']
                # apply growth to new energy rate
                new_p_energy = temp['price'] * (1 + self.growth)**years
                self.price = pd.concat([self.price, new_p_energy],
                                       sort=True)  # add to existing
예제 #8
0
    def grow_drop_data(self, years, frequency, load_growth):
        """ Update variable that hold timeseries data after adding growth data. These method should be called after
        add_growth_data and before the optimization is run.

        Args:
            years (List): list of years for which analysis will occur on
            frequency (str): period frequency of the timeseries data
            load_growth (float): percent/ decimal value of the growth rate of loads in this simulation


        """
        # timeseries data
        self.system_load = Lib.fill_extra_data(self.system_load, years,
                                               load_growth, frequency)
        self.system_load = Lib.drop_extra_data(self.system_load, years)

        self.months = Lib.fill_extra_data(self.months, years, 0, frequency)
        self.months = Lib.drop_extra_data(self.months, years)

        self.cap_commitment = Lib.fill_extra_data(self.cap_commitment, years,
                                                  0, frequency)
        self.cap_commitment = Lib.drop_extra_data(self.cap_commitment, years)

        # monthly data
        self.cap_monthly = Lib.fill_extra_data(self.cap_monthly, years, 0, 'M')
        self.cap_monthly = Lib.drop_extra_data(self.cap_monthly, years)

        self.cap_price = Lib.fill_extra_data(self.cap_price, years, 0, 'M')
        self.cap_price = Lib.drop_extra_data(self.cap_price, years)

        self.ene_price = Lib.fill_extra_data(self.ene_price, years, 0, 'M')
        self.ene_price = Lib.drop_extra_data(self.ene_price, years)
예제 #9
0
    def grow_drop_data(self, years, frequency, load_growth):
        """ Adds data by growing the given data OR drops any extra data that
        might have slipped in. Update variable that hold timeseries data
        after adding growth data. These method should be called after
        add_growth_data and before the optimization is run.

        Args:
            years (List): list of years for which analysis will occur on
            frequency (str): period frequency of the timeseries data
            load_growth (float): percent/ decimal value of the growth rate of
                loads in this simulation

        """
        super().grow_drop_data(years, frequency, load_growth)
        self.eou_avg = Lib.fill_extra_data(self.eou_avg, years, 0, frequency)
        self.eou_avg = Lib.drop_extra_data(self.eou_avg, years)

        self.eod_avg = Lib.fill_extra_data(self.eod_avg, years, 0, frequency)
        self.eod_avg = Lib.drop_extra_data(self.eod_avg, years)

        if self.u_ts_constraints:
            self.regu_max = Lib.fill_extra_data(self.regu_max, years, 0,
                                                frequency)
            self.regu_max = Lib.drop_extra_data(self.regu_max, years)

            self.regu_min = Lib.fill_extra_data(self.regu_min, years, 0,
                                                frequency)
            self.regu_min = Lib.drop_extra_data(self.regu_min, years)

        if self.d_ts_constraints:
            self.regd_max = Lib.fill_extra_data(self.regd_max, years, 0,
                                                frequency)
            self.regd_max = Lib.drop_extra_data(self.regd_max, years)

            self.regd_min = Lib.fill_extra_data(self.regd_min, years, 0,
                                                frequency)
            self.regd_min = Lib.drop_extra_data(self.regd_min, years)
예제 #10
0
 def grow_drop_data(self, years, frequency, load_growth):
     if self.site_cooling_load is not None:
         self.site_cooling_load = Lib.fill_extra_data(
             self.site_cooling_load, years, load_growth, frequency)
         self.site_cooling_load = Lib.drop_extra_data(
             self.site_cooling_load, years)
예제 #11
0
    def assign_optimization_level(analysis_years, control_horizon,
                                  predictive_horizon, frequency, dt):
        """ creates an index based on the opt_years presented and then

         Args:
            analysis_years (list): List of Period years where we need data for
            control_horizon (str, int): optimization window length from the user
            predictive_horizon (str, int): mcp horizon input from the user
                (if 0, then assume the same as CONTROL_HORIZON)
                should be greater than or equal to CONTROL_HORIZON value
            frequency (str): time step in string form
            dt (float): time step

        Return:
            opt_agg (DataFrame): 1 column, all indexes with the same value will be in one
            optimization problem together

        """
        # create dataframe to fill
        level_index = Lib.create_timeseries_index(analysis_years, frequency)
        level_df = pd.DataFrame({'control': np.zeros(len(level_index))},
                                index=level_index)
        current_control_level = 0
        # control level should not overlap multiple years & there is only one per timestep
        for yr in level_index.year.unique():
            sub = copy.deepcopy(level_df[level_df.index.year == yr])
            if control_horizon == 'year':
                # continue counting from previous year opt_agg
                level_df.loc[level_df.index.year == yr,
                             'control'] = current_control_level + 1
            elif control_horizon == 'month':
                # continue counting from previous year opt_agg
                level_df.loc[
                    level_df.index.year == yr,
                    'control'] = current_control_level + sub.index.month
            else:
                # n is number of hours
                control_horizon = int(control_horizon)
                sub['ind'] = range(len(sub))
                # split year into groups of n days
                ind = (sub.ind // (control_horizon / dt)).astype(int) + 1
                # continue counting from previous year opt_agg
                level_df.loc[level_df.index.year == yr,
                             'control'] = ind + current_control_level
            current_control_level = max(level_df.control)

        # predictive level can overlap multiple years & there can be 1+ per timestep
        if not predictive_horizon:
            # set to be the control horizon
            level_df['predictive'] = level_df.loc[:, 'control']
        else:
            # TODO this has not been tested yet -- HN (sorry hmu and I will help)
            # create a list of lists
            max_index = len(level_df['control'])
            predictive_level = np.repeat([], max_index)
            current_predictive_level_beginning = 0
            current_predictive_level = 0

            for control_level in level_df.control.unique():
                if predictive_horizon == 'year':
                    # needs to be a year from the beginning of the current predictive level, determine
                    # length of the year based on first index in subset
                    start_year = level_index[
                        current_predictive_level_beginning].year[0]
                    f_date = date(start_year, 1, 1)
                    l_date = date(start_year + 1, 1, 1)
                    delta = l_date - f_date
                    current_predictive_level_end = int(delta.days * dt)

                elif predictive_horizon == 'month':
                    # needs to be a month from the beginning of the current predictive level, determine
                    # length of the month based on first index in subset
                    start_index = level_index[
                        current_predictive_level_beginning]
                    current_predictive_level_end = calendar.monthrange(
                        start_index.year, start_index.month)
                else:
                    current_predictive_level_end = predictive_horizon * dt
                # make sure that CURRENT_PREDICTIVE_LEVEL_END stays less than or equal to MAX_INDEX
                current_predictive_level_end = min(
                    current_predictive_level_end, max_index)
                # add CURRENT_PREDICTIVE_LEVEL to lists between CURRENT_PREDICTIVE_LEVEL_BEGINNING and CURRENT_PREDICTIVE_LEVEL_END
                update_levels = predictive_level[
                    current_predictive_level_beginning,
                    current_predictive_level_end]
                update_levels = [
                    dt_level.append(current_predictive_level)
                    for dt_level in update_levels
                ]
                predictive_level[current_predictive_level_beginning,
                                 current_predictive_level_end] = update_levels
                current_predictive_level_beginning = np.sum(
                    level_df.control == control_level)
                # increase CURRENT_PREDICTIVE_LEVEL
                current_predictive_level += 1
            level_df['predictive'] = predictive_level
        return level_df