Exemple #1
0
def test_download(caplog, monkeypatch):
    from deflex import tools

    url = ("https://files.de-1.osf.io/v1/resources/a5xrj/providers/osfstorage/"
           "5fdc7e3df0df5405452ef7ae?action=download&direct&version=1")
    fn = os.path.join(os.path.expanduser("~"), ".tmp_test_x456FG6")
    caplog.set_level(logging.DEBUG)
    assert not os.path.isfile(fn)
    tools.download(fn, url)
    assert ".tmp_test_x456FG6" in caplog.text
    assert os.path.isfile(fn)
    caplog.clear()
    tools.download(fn, url)
    assert "Downloading" not in caplog.text
    os.remove(fn)
def get_price_from_opsd(path):
    """Get day ahead prices from opsd time series."""
    fn = os.path.join(path, "opsd_day_ahead_prices.csv")
    tools.download(fn, OPSD_URL)

    de_ts = pd.read_csv(
        fn,
        index_col="utc_timestamp",
        parse_dates=True,
        date_parser=lambda col: pd.to_datetime(col, utc=True),
    )
    de_ts.index = de_ts.index.tz_convert("Europe/Berlin")
    de_ts.index.rename("cet_timestamp", inplace=True)
    berlin = pytz.timezone("Europe/Berlin")
    start_date = berlin.localize(datetime(2014, 1, 1, 0, 0, 0))
    end_date = berlin.localize(datetime(2014, 12, 31, 23, 0, 0))
    return de_ts.loc[start_date:end_date, "DE_price_day_ahead"]
Exemple #3
0
url = (
    "https://files.de-1.osf.io/v1/resources/a5xrj/providers/osfstorage"
    "/605c566be12b600065aa635f?action=download&direct&version=1"
)

# !!! ADAPT THE PATH !!!
path = "your/path"

# Set logger
logger.define_logging()

# Download and unzip scenarios (if zip-file does not exist)
os.makedirs(path, exist_ok=True)
fn = os.path.join(path, "deflex_scenario_examples_v03.zip")
if not os.path.isfile(fn):
    tools.download(fn, url)
with ZipFile(fn, "r") as zip_ref:
    zip_ref.extractall(path)
logging.info("All v0.3.x scenarios examples extracted to %s.", path)

# Look in your folder above. You should see some scenario files. The csv and
# the xlsx scenarios are the same. The csv-directories cen be read faster by
# the computer but the xlsx-files are easier to read for humans because all
# sheets are in one file.

# NOTE: Large models will need up to 24 GB of RAM, so start with small models
# and increase the size step by step. You can also use large models with less
# time steps but you have to adapt the annual limits.

# Now choose one example. We will start with a small one:
file = "deflex_2014_de02_no-heat_csv"
    end_date = berlin.localize(datetime(2014, 12, 31, 23, 0, 0))
    return de_ts.loc[start_date:end_date, "DE_price_day_ahead"]


# !!! ADAPT THE PATH !!!
my_path = "your/path"

# Set logger
logger.define_logging()

# Download and unzip scenarios (if zip-file does not exist)
os.makedirs(my_path, exist_ok=True)
my_fn = os.path.join(my_path, "deflex_result_examples_v03.zip")
os.makedirs(os.path.dirname(my_fn), exist_ok=True)
if not os.path.isfile(my_fn):
    tools.download(my_fn, OSF_URL)
    with ZipFile(my_fn, "r") as zip_ref:
        zip_ref.extractall(my_path)
    logging.info("All v0.3.x result examples extracted to %s.", my_path)

# Search for all de02-result-files.
result_files = pp.search_results(path=my_path, map=["de02"])

# Restore the results for all found files
results = pp.restore_results(result_files)

# Create a table with the key values of a restored files
key_values = analyses.get_key_values_from_results(results)

# Store the table with the key_values
key_values_file = os.path.join(my_path, "key_values_v03.xlsx")