コード例 #1
0
ファイル: config_reader_ex.py プロジェクト: bsgip/c3x-data
"""
    Extracts each block of a config files
"""

from c3x.data_loaders import configfileparser

# Reads a config file to produce a dictionary that can be handed over to functions
config = configfileparser.ConfigFileParser("config/example_full_config.ini")

data_paths = config.read_data_path()
batch_info = config.read_batches()
time = config.read_time_filters()
signs = config.read_sign_correction()
duplicates = config.read_duplicate_removal()
data_usage = config.read_data_usage()
nan_handling = config.read_nan_handeling()
resampling = config.read_resampling()
refill = config.read_refill()
optimiser_objectives_set = config.read_optimiser_objective_set()
optimiser_objectives = config.read_optimiser_objectives()
inverter = config.read_inverter()
energy_storage = config.read_energy_storage()
energy_system = config.read_energy_system()
tariff_factors = config.read_tariff_factors()
scenario = config.read_scenario_info()
measurement_types = config.read_measurement_types()
コード例 #2
0
ファイル: demonstrate_FoMs.py プロジェクト: bsgip/c3x-data
 This examples shows how cleaned data can be read for further use
"""

import os
import pandas
import numpy
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import datetime

# BSGIP specific tools
from c3x.data_loaders import configfileparser, nextgen_loaders, tariff_loaders
from c3x.data_cleaning import cleaners
from c3x.data_statistics import figure_of_merit

config = configfileparser.ConfigFileParser("config/example_for_FoMs.ini")

measurement_types = config.read_data_usage()
data_usage = config.read_measurement_types()
data_paths = config.read_data_path()
data_files = []

# Create a nextGen data object that has working paths and can be sliced using batches
next_gen = nextgen_loaders.NextGenData('FoM', source=data_paths['source'],
                                       batteries=data_paths["batteries"],
                                       solar=data_paths["solar"],
                                       loads=data_paths["loads"],
                                       node=data_paths["node"],
                                       results=data_paths["results"])
cleaned_data = next_gen.read_clean_data(data_usage["loads"], data_usage["solar"], data_usage["batteries"])
コード例 #3
0
    the data from those connections points is saved as evolve core network objects in pickeld numpy files

    Each node is  saved separate in a files named node_$ID.npy
"""
import os
import pandas
import pickle
from time import mktime

# BSGIP specific tools
from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_cleaning import cleaners

##################### Load and check data #####################

config = configfileparser.ConfigFileParser(
    "./scripts/config/example_for_cleaning.ini")

data_paths = config.read_data_path()
batch_info = config.read_batches()
data_usage = config.read_data_usage()

# Create a nextGen data object that has working paths and can be sliced using batches
next_gen = nextgen_loaders.NextGenData(
    data_name='NextGen',
    source=data_paths["source"],
    batteries=data_paths["batteries"],
    solar=data_paths["solar"],
    node=data_paths["node"],
    loads=data_paths["loads"],
    results=data_paths["results"],
    number_of_batches=batch_info["number_of_batches"],
コード例 #4
0
    This leads to NaN blocks in the data and the data refiller can attempt to refill that with
    data. After that
    Each node is  saved separate in a files named node_$ID.npy
"""
import os
import pandas
from time import mktime

# BSGIP specific tools
from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_cleaning import cleaners

##################### Load and check data #####################

# config = configfileparser.ConfigFileParser("config/cleaning_for_dan.ini")
config = configfileparser.ConfigFileParser("config/example_for_refill.ini")

data_paths = config.read_data_path()
batch_info = config.read_batches()
measurement_types = config.read_data_usage()

# Create a nextGen data object that has working paths and can be sliced using batches
next_gen = nextgen_loaders.NextGenData(
    data_name='NextGen',
    source=data_paths["source"],
    batteries=data_paths["batteries"],
    solar=data_paths["solar"],
    node=data_paths["node"],
    loads=data_paths["loads"],
    results=data_paths["results"],
    number_of_batches=batch_info["number_of_batches"],
コード例 #5
0
    data. After that
    Each node is  saved separate in a files named node_$ID.npy
"""
import os
import pandas
import pickle
from time import mktime

# BSGIP specific tools
from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_cleaning import cleaners

##################### Load and check data #####################

# config = configfileparser.ConfigFileParser("config/cleaning_for_dan.ini")
config = configfileparser.ConfigFileParser("./scripts/config/example_for_refill.ini")

data_paths = config.read_data_path()
batch_info = config.read_batches()
data_usage = config.read_data_usage()

# Create a nextGen data object that has working paths and can be sliced using batches
next_gen = nextgen_loaders.NextGenData(data_name='NextGen',
                                       source=data_paths["source"],
                                       batteries=data_paths["batteries"],
                                       solar=data_paths["solar"],
                                       node=data_paths["node"],
                                       loads=data_paths["loads"],
                                       results=data_paths["results"],
                                       number_of_batches=batch_info["number_of_batches"],
                                       files_per_batch=batch_info["files_per_batch"],
コード例 #6
0
ファイル: statistics_ex.py プロジェクト: bsgip/c3x-data
import os
import pandas

from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_statistics import statistics as stats

# Reads a config file to produce a dictionary that can be handed over to functions
config = configfileparser.ConfigFileParser("config/config_nextGen_stats.ini")
data_paths = config.read_data_path()
batch_info = config.read_batches()
measurement_types = config.read_data_usage()

# Create a nextGen data object that has working paths and can be sliced using batches
# it might be appropriate for the example to make the batches smaller, however that may
# increase computing time,
# the next row can be commented if data was processed prior to running this script

nextgen = nextgen_loaders.NextGenData(
    data_name='NextGen',
    source=data_paths["source"],
    batteries=data_paths["batteries"],
    solar=data_paths["solar"],
    node=data_paths["node"],
    loads=data_paths["loads"],
    results=data_paths["results"],
    stats=data_paths["stats"],
    number_of_batches=batch_info["number_of_batches"],
    files_per_batch=batch_info["files_per_batch"],
    concat_batches_start=batch_info["concat_batches_start"],
    concat_batches_end=batch_info["concat_batches_end"])
# now we have a folder structure with lots of files with batch numbers