Esempio n. 1
0
    entries = ['db', 'metric', 'run_name', 'folder_results']
    tasks = pd.DataFrame(columns=entries)
    for db in dbs:

        # metrics available
        # metrics = ['costs_yearly', 'emissions_yearly', 'activity_by_fuel', 'activity_by_tech', 'capacity_by_fuel',
        #            'capacity_by_tech']

        # For our analysis we only use the following metrics
        if db == "T_0.sqlite" or db == "U_0.sqlite" or db == "V_0.sqlite":
            metrics = [
                'costs_yearly', 'emissions_yearly', 'activity_by_fuel',
                'activity_by_tech', 'capacity_by_fuel', 'capacity_by_tech'
            ]
        else:
            metrics = ['costs_yearly', 'emissions_yearly']

        for metric in metrics:
            t = pd.Series(index=entries)
            t['db'] = db
            t['metric'] = metric
            t['run_name'] = tt.remove_ext(db)
            t['folder_results'] = os.path.join(result_folder, t['run_name'])
            tasks = tasks.append(t, ignore_index=True)

    # Perform simulations in parallel
    with parallel_backend('multiprocessing', n_jobs=ncpus):
        Parallel(n_jobs=ncpus, verbose=5)(delayed(analyze_results)(
            task, db_folder, all_dbs_dict, db_shift, node_prob,
            tech_group_dict, prob_type_dict, infra_dict, carbon_tax_dict)
                                          for index, task in tasks.iterrows())
    os.mkdir(stochdir)
os.chdir(stochdir)

n_cases = 1
for case in range(n_cases):
    # historical probabilities
    if case == 0:
        probabilities = probabilities_hist

    # climate change probabilities
    else:  # case == 1:
        probabilities = probabilities_climate_change

    # Iterate through each database for each case
    for db in dbs:
        db_name = tt.remove_ext(db)

        # ====================================
        # Stochastic input file
        # ====================================
        # Write File
        filename = "stoch_" + db_name + "_" + str(case) + ".py"
        # Open File
        f = open(filename, "w")
        f.write(
            "# Automatically generated stochastic input file from temoatools github.com/EnergyModels/temoatools\n\n"
        )
        f.write("verbose = True\n")
        f.write("force = True\n")
        f.write("\n")
        f.write("dirname = '" + db_name + "_" + str(case) + "'\n")  # Update
Esempio n. 3
0
def CreateConfigFile(model_directory,
                     model_filename,
                     saveEXCEL=False,
                     saveTEXTFILE=False,
                     keep_pyomo_lp_file=False,
                     debug=False,
                     solver=''):
    # Locate Database
    full_filename = tt.remove_ext(model_filename) + '.sqlite'
    dBpath = os.path.join(model_directory, full_filename)

    # Write Config File
    config_file = "config_" + tt.remove_ext(model_filename) + ".txt"
    if debug == True:
        print("config_file: " + str(config_file))
    f = open(config_file, "w")
    # ---
    f.write("#-----------------------------------------------------\n")
    f.write(
        "# This is an automatically generated configuration file for Temoa using"
    )
    f.write(" temoatools github.com/EnergyModels/temoatools\n")
    f.write(
        "# It allows you to specify (and document) all run-time model options\n"
    )
    f.write("# Legal chars in path: a-z A-Z 0-9 - _ \' / . :\n")
    f.write("# Comment out non-mandatory options to omit them\n")
    f.write("#-----------------------------------------------------\n")
    f.write("\n")
    f.write("# Input File (Mandatory)\n")
    f.write("# Input can be a .sqlite or .dat file\n")
    f.write("# Both relative path and absolute path are accepted\n")
    f.write("--input=" + dBpath + "\n")
    f.write("\n")
    f.write("# Output File (Mandatory)\n")
    f.write("# The output file must be a existing .sqlite file\n")
    f.write("--output=" + dBpath + "\n")
    f.write("\n")
    f.write("# Scenario Name (Mandatory)\n")
    f.write(
        "# This scenario name is used to store results within the output .sqlite file\n"
    )
    f.write("--scenario=" + "solve" + "\n")
    f.write("\n")
    f.write("# Path to the \"db_io\" folder (Mandatory)\n")
    f.write("# This is the location where database files reside\n")
    f.write("--path_to_db_io=" + model_directory + "\n")
    f.write("\n")
    f.write("# Spreadsheet Output (Optional)\n")
    f.write("# Direct model output to a spreadsheet\n")
    f.write(
        "# Scenario name specified above is used to name the spreadsheet\n")
    # ---
    # Option - saveExcel file, Turn off to save HD space
    # ---
    if saveEXCEL:
        f.write("--saveEXCEL\n")
    else:
        f.write("#--saveEXCEL\n")
    # ---
    f.write("\n")
    f.write("# Save the log file output (Optional)\n")
    f.write("# This is the same output provided to the shell\n")
    # ---
    # Option - saveTEXTFILE file, Turn off to save HD space
    # ---
    if saveTEXTFILE == True:  # Turn off to save HD space
        f.write("--saveTEXTFILE\n")
    else:
        f.write("#--saveTEXTFILE\n")
    # ---
    f.write("\n")
    f.write("# Solver-related arguments (Optional)\n")
    if len(solver) > 0:
        f.write("--solver=" + solver +
                "                    # Optional, indicate the solver\n")
    else:
        f.write(
            "#--solver=cplex                    # Optional, indicate the solver\n"
        )
    # ---
    # Option - keep_pyomo_lp_file file, Turn off to save HD space
    # ---
    if keep_pyomo_lp_file:  # Turn off to save HD space
        f.write(
            "--keep_pyomo_lp_file             # Optional, generate Pyomo-compatible LP file\n"
        )
    else:
        f.write(
            "#--keep_pyomo_lp_file             # Optional, generate Pyomo-compatible LP file\n"
        )
    # ---
    f.write("\n")
    f.write("# Modeling-to-Generate Alternatives (Optional)\n")
    f.write(
        "# Run name will be automatically generated by appending '_mga_' and iteration number to scenario name\n"
    )
    f.write("#--mga {\n")
    f.write(
        "#	slack=0.1                     # Objective function slack value in MGA runs\n"
    )
    f.write("#	iteration=4                   # Number of MGA iterations\n")
    f.write(
        "#	weight=integer                # MGA objective function weighting method, currently)"
    )
    f.write("'integer' or 'normalized'\n")
    f.write("#}\n")
    f.write("\n")
    f.close()
    # ---
    return config_file
def getActivityTOD(folders,
                   dbs,
                   switch='fuel',
                   sector_name='electric',
                   save_data='N',
                   create_plots='N',
                   conversion=277.777778,
                   run_name=''):
    #    inputs:
    #    1) folders         - paths containing dbs (list or single string if all in the same path)
    #    2) dbs             - names of databases (list)
    #    3) switch          - 'fuel' or 'tech', basis of categorization
    #    4) sectorName      - name of temoa sector to be analyzed
    #    5) saveData         - 'Y' or 'N', default is 'N'
    #    6) createPlots     - 'Y' or 'N', default is 'N'
    #    7) conversion      - conversion to GWh, default is 277.778 (from PJ)
    #    8) run_name         - Used for saving results in dedicated folder

    #    outputs:
    #    1) activity
    #    2) plots - optional
    #    3) Data  - optional
    # ==============================================================================
    print("Analyzing activity by time of day (TOD)")

    # Save original directory
    wrkdir = os.getcwd()

    # If only a single db and folder provided, change to a list
    if type(dbs) == str and type(folders) == str:
        dbs = [dbs]
        folders = [folders]
    # If a list of folders is provided with one database, only use first folder
    elif type(dbs) == str:
        dbs = [dbs]
        folders = [folders[0]]
    # If only a single folder provided, create a list of the same folder
    elif type(folders) == str:
        fldrs = []
        for db in dbs:
            fldrs.append(folders)
        folders = fldrs

    # Create dataframe to hold each capacity_single series
    activity = pd.DataFrame(dtype='float64')

    # Iterate through each db
    for folder, db in zip(folders, dbs):
        activity_single = SingleDB(folder,
                                   db,
                                   switch=switch,
                                   sector_name=sector_name,
                                   conversion=conversion)
        activity = pd.concat([activity, activity_single])

    # Reset index (remove multi-level indexing, easier to use in Excel)
    activity = activity.reset_index()

    # Directory to hold results
    if save_data == 'Y' or create_plots == 'Y':
        tt.create_results_dir(wrkdir=wrkdir, run_name=run_name)

    # Save results to CSV
    if save_data == 'Y':
        # Create savename based on switch
        if switch == 'fuel':
            savename = 'activityTOD_by_fuel.csv'
        else:
            savename = 'activityTOD_by_tech.csv'
        activity.to_csv(savename)

    if create_plots == 'Y':

        df = activity.reset_index()

        import matplotlib.pyplot as plt
        import seaborn as sns
        plt.rcParams.update({'figure.max_open_warning': 0})  # ignore warning

        for database in df.database.unique():
            # new figure
            plt.figure()
            # set aesthetics
            sns.set_style(
                "white", {
                    "font.family": "serif",
                    "font.serif": ["Times", "Palatino", "serif"]
                })
            sns.set_context("talk")

            # select relevant database
            df2 = df[(df.database == database)]
            # plot
            sns.relplot(x='tod',
                        y='value',
                        hue='fuelOrTech',
                        row='year',
                        col='season',
                        data=df2,
                        kind='line')

            # save
            if switch == 'fuel':
                savename = 'yearlyActivityTOD_byFuel' + tt.remove_ext(
                    database) + '.pdf'
            else:
                savename = 'yearlyActivityTOD_byTech' + tt.remove_ext(
                    database) + '.pdf'
            plt.savefig(savename, dpi=resolution)
            # close the figure
            plt.close()

    # Return to original directory
    os.chdir(wrkdir)

    return activity
Esempio n. 5
0
def move_data_to_db(XLSX, path=os.path.normcase('.')):
    # =============================================================================
    # Begin Function
    # =============================================================================
    data_path = os.path.join(path, 'data')
    print(data_path)

    workDir = os.getcwd()
    os.chdir(data_path)

    # Empty db with set schema (expected to be within the same folder)

    emptydB = os.path.join(tt.resource_path, "db_schema_universal.db")

    # Create output filename using inputfilename
    outputdB = tt.remove_ext(XLSX) + ".db"

    # Keep track of sheet_names and corresponding number of columns to read-in
    sheets = [("representativeDays", 3), ("timesOfDay", 2), ("Connections", 18), ("ConnectionsExisting", 4),
              ("Demand", 4), ("DemandTOD", 3), ("DiscountRateGlobal", 2), ("Emission", 5), ("Fuels", 19),
              ("FuelsExisting", 4), ("PowerPlants", 12),
              ("PowerPlantsPerformance", 10), ("PowerPlantsCosts", 12), ("PowerPlantsConstraints", 10),
              ("PowerPlantsExisting", 4), ("MinCapacity", 4), ("ReserveMargin", 2), ("capacityFactorTOD", 5),
              ("ref", 6)]

    # ----------
    # sqlite file prep
    # ----------
    # Delete old *.sqlite file (if it already exists) and copy/rename copy of temoa_schema.sqlite
    if os.path.isfile(outputdB):
        os.remove(outputdB)
    shutil.copyfile(emptydB, outputdB)

    # Set-up sqlite connection
    conn = sqlite3.connect(outputdB)
    c = conn.cursor()

    # ----------
    # sqlite file prep
    # ----------
    for sheet in sheets:

        # Extract sheet_name and number of columns for each sheet:
        sheet_name = sheet[0]
        sheet_col = sheet[1]

        # Read XLS sheet
        df = pd.read_excel(XLSX, sheet_name=sheet_name)
        df = df.drop([0])  # Remove first row (units)

        # Create SQL command based on number of entries
        command = 'INSERT INTO ' + sheet_name + ' VALUES (?'
        for i in range(sheet_col - 1):
            command = command + ',?'
        command = command + ')'

        # Execute SQL command
        try:
            c.executemany(command, np.array(df))
        except:
            print(command)
            print(np.array(df))
            c.executemany(command, np.array(df))

    # ----------
    # Save(commit) the changes and close sqlite file
    # ----------
    conn.commit()
    conn.close()

    os.chdir(workDir)
    return outputdB
    # new figure
    plt.figure()

    # set aesthetics
    sns.set_style("white", {
        "font.family": "serif",
        "font.serif": ["Times", "Palatino", "serif"]
    })
    sns.set_context("talk")

    # select relevant database
    df2 = df[(df.database == database)]

    # plot
    sns.relplot(x='Hour (-)',
                y='Activity (GWh)',
                hue='Fuel',
                row='Year',
                col='Season',
                data=df2,
                kind='line',
                palette='bright')

    # save
    savename = 'plot_yearlyActivityTOD_byFuel_' + tt.remove_ext(
        database) + '.pdf'
    plt.savefig(savename, dpi=600)

# return to original directory
os.chdir(wrkdir)
Esempio n. 7
0
    def test_remove_ext(self):

        filename = "longFileName1234.sqlite"
        result = tt.remove_ext(filename)
        expected = "longFileName1234"
        self.assertEqual(result, expected)