示例#1
0
    def setUp(self) -> None:
        self.__log = logging.getLogger('IncidentsDataframeAggregator')

        s_config = '{"data_structure":{"columns":[], ' \
                    '"convert_to_number": [],' \
                    '"incident_outcome_columns":["COL01","COL02","COL03"],' \
                    '"other_injured_columns": ["OINJ01", "OINJ02"],' \
                    '"other_deadh_columns": ["ODEA01"]' \
                    '}}'
        config = json.loads(s_config)
        support_dataframes = SupportDecodeDataframes()
        support_dataframes.load_dataframes()

        self.incidenti = Incidenti(
            file_incidenti=os.path.join("/fake_dir/", "fake_file_name.csv"),
            anagrafica_comuni=AnagraficaComuni("/fake_dir/fake_filename.csv"),
            support_dataframes=support_dataframes,
            data_file_configurations=config["data_structure"])
        test_df = {
            "Comune": [
                "Milano", "Como", "Como", "Cernusco Sul Naviglio", "Roncolate",
                "Vimodrone", "Almenno"
            ],
            "Provincia": ["MI", "CO", "CO", "MI", "BG", "MI", "AL"],
            "comune": ["1001", "1000", "1000", "1002", "1003", "1004", "1005"],
            "veicolo__a___sesso_conducente": [1, 1, 2, 0, 1, 1, 0],
        }
        self.incidenti.df_incidenti = pd.DataFrame.from_records(test_df)

        self.aggregator = IncidentsDataframeAggregator(self.incidenti)
    def setUp(self) -> None:
        self.__log = logging.getLogger('IncidentsDataframeAggregator')

        s_config = '{"data_structure":{"columns":[], ' \
                    '"convert_to_number": [],' \
                    '"incident_outcome_columns":["COL01","COL02","COL03"],' \
                    '"other_injured_columns": ["OINJ01", "OINJ02"],' \
                    '"other_deadh_columns": ["ODEA01"]' \
                    '}}'
        config = json.loads(s_config)
        support_dataframes = SupportDecodeDataframes()
        support_dataframes.load_dataframes()

        self.incidenti = Incidenti(file_incidenti = os.path.join("/fake_dir/", "fake_file_name.csv"),
                                   anagrafica_comuni = AnagraficaComuni("/fake_dir/fake_filename.csv"),
                                   support_dataframes = support_dataframes,
                                   data_file_configurations=config["data_structure"])
        test_df = {
            "Comune":          ["Milano", "Como", "Como", "Cernusco Sul Naviglio","Roncolate", "Vimodrone"],
            "comune":          ["1001",   "1000", "1000", "1002",                 "1003",      "1004"],
            "Provincia":       ["MI",     "CO",   "CO",   "MI",                    "BG",       "MI"],

            "COL01":           [ 0,        1,      1,      1,                       1,          4], # Incolume: 4, Morto: 1
            "COL02":           [ 4,        3,      1,      2,                       2,          1], # Incolume: 2, Morto: 2, Ferito: 2
            "COL03":           [ 1,        1,      2,      2,                       4,          1], # Incolume: 3, Morto: 1, Ferito: 2

            "ODEA01":          [ 0,        0,      2,      1,                       0,          1], # 4

            "OINJ01":          [ 3,        0,      0,      0,                       5,          0], # 8
            "OINJ02":          [ 6,        0,      1,      1,                       0,          1], # 9
            "natura_incidente":[11,        1,      1,      2,                       2,          1]
        }
        self.incidenti.df_incidenti = pd.DataFrame.from_records(test_df)

        self.aggregator = IncidentsDataframeAggregator(self.incidenti)
示例#3
0
def main(args):
    log = logging.getLogger('RunIncidenti')
    log.info("************> INIZIO <************")
    rv = False
    try:
        if os.path.isfile(args.config_file) == False:
            log.error("Invalid config file parameter: {fi}".format(
                fi=args.config_file))
            return rv
        with open(args.config_file) as fconfig:
            config = json.load(fconfig)
            base_dir = config['data_files']['data_path']
            incidenti_fn = config['data_files']['file_incidenti']
            com_anagr_fn = config['data_files']['anagrafica_comuni']

            anagrafica_comuni = AnagraficaComuni(
                file_comuni_anagrafica=os.path.join(base_dir, com_anagr_fn))
            support_dataframes = SupportDecodeDataframes()

            incidenti = Incidenti(
                file_incidenti=os.path.join(base_dir, incidenti_fn),
                anagrafica_comuni=anagrafica_comuni,
                support_dataframes=support_dataframes,
                data_file_configurations=config["data_structure"])
            rv = incidenti.load_data_files()

            aggregator = IncidentsDataframeAggregator(incidenti)

            if rv == False:
                log.error("Fallita lettura dei datafile.")
            else:
                if args.operation == "article":
                    rv = article_artifacts_maker(aggregator, incidenti, config)
                elif args.operation == "debug":
                    aggregator.get_incidents_outcome_by_typology('nation_wide')
                    aggregator.calculate_total_incident_outcome()
                    chart_male_female(aggregator, config)
                    chart_incident_typology(aggregator, config)
                    chart_hourly_incidents(aggregator, config)
                    passengers_outcome("province", "Milano", aggregator,
                                       incidenti, config)

    except Exception as ex:
        log.error("The job returned an error - {ex}".format(ex=str(ex)))

    if rv == False:
        log.error("Operation failed - calculated data is not valid.")

    log.info("************> FINE <************")
    return rv
    def setUp(self) -> None:
        self.__log = logging.getLogger('IncidentsDataframeAggregator')

        s_config = '{"data_structure":{"columns":[], "convert_to_number": []}}'
        config = json.loads(s_config)

        incidenti = Incidenti(file_incidenti = os.path.join("/fake_dir/", "fake_file_name.csv"),
                              anagrafica_comuni = AnagraficaComuni("/fake_dir/fake_filename.csv"),
                              support_dataframes = SupportDecodeDataframes(),
                              data_file_configurations=config["data_structure"])
        test_df = {
            "Comune":         ["Milano", "Como", "Como", "Cernusco Sul Naviglio", "Roncolate"],
            "comune":         ["1001",   "1000", "1000", "1002",                  "1003" ],
            "provincia":      ["MI",     "CO",   "CO",   "MI",                    "BG"],
            "tipo_veicolo_a": [ 1,        1,      2,      1,                      2]
        }
        incidenti.df_incidenti = pd.DataFrame.from_records(test_df)
        self.aggregator = IncidentsDataframeAggregator(incidenti)
示例#5
0
    def setUp(self) -> None:
        self.__log = logging.getLogger('IncidentsDataframeAggregator')

        s_config = '{"data_structure":{"columns":[], ' \
                    '"convert_to_number": [],' \
                    '"incident_outcome_columns":["COL01","COL02","COL03"],' \
                    '"other_injured_columns": ["OINJ01", "OINJ02"],' \
                    '"other_deadh_columns": ["ODEA01"]' \
                    '}}'
        config = json.loads(s_config)
        support_dataframes = SupportDecodeDataframes()
        support_dataframes.load_dataframes()

        self.incidenti = Incidenti(
            file_incidenti=os.path.join("/fake_dir/", "fake_file_name.csv"),
            anagrafica_comuni=AnagraficaComuni("/fake_dir/fake_filename.csv"),
            support_dataframes=support_dataframes,
            data_file_configurations=config["data_structure"])
        test_df = {
            "Comune": [
                "Milano", "Como", "Como", "Cernusco Sul Naviglio", "Roncolate",
                "Vimodrone", "Peschiera Borromeo"
            ],
            "comune": ["1001", "1000", "1000", "1002", "1003", "1004", "1005"],
            "Provincia": ["MI", "CO", "CO", "MI", "BG", "MI", "MI"],
            "veicolo__a___esito_conducente": [1, 1, 3, 0, 1, 0, 0],
            "veicolo__a___passeggeri_an35": [1, 2, 1, 0, 1, 0, 0],
            "veicolo__a___esito_passegg38": [2, 1, 0, 0, 3, 0, 0],
            "veicolo__a___esito_passegg41": [2, 1, 2, 0, 4, 0, 0],
            "veicolo__a___esito_passegg44": [3, 3, 4, 0, 0, 0, 0]
        }
        self.incidenti.df_incidenti = pd.DataFrame.from_records(test_df)

        self.aggregator = IncidentsDataframeAggregator(self.incidenti)
        self.outcome_labels = [
            "Incolume", "Ferito", "Morto nelle 24 ore",
            "Morto entro il trentesimo giorno", "Non dato"
        ]
示例#6
0
from src.ClassIncidenti import Incidenti
from src.ClassAnagraficaComuni import AnagraficaComuni
from src.ClassSupportDecodeDataframes import SupportDecodeDataframes
from src.ClassIncidentsDataframeAggregator import IncidentsDataframeAggregator

config = None
with open(config_file) as fconfig:
    config = json.load(fconfig)

init_logger('/tmp', log_level=logging.FATAL, std_out_log_level=logging.FATAL)

base_dir = config['data_files']['data_path']
print(base_dir)
incidenti_fn = config['data_files']['file_incidenti']
incidenti_cols = config['data_structure']['columns']
cols_to_be_converted = config["data_structure"]["convert_to_number"]

anagrafica_comuni = AnagraficaComuni(
    file_comuni_anagrafica=os.path.join(data_dir, istat_comuni_file))
support_dataframes = SupportDecodeDataframes()

incidenti = Incidenti(file_incidenti=os.path.join(data_dir, incidenti_fn),
                      anagrafica_comuni=anagrafica_comuni,
                      support_dataframes=support_dataframes,
                      data_file_configurations=config["data_structure"])
incidenti.load_data_files()
aggregator = IncidentsDataframeAggregator(incidenti)

("Vers. Incidenti:", Incidenti.get_version(), "Vers. Aggregator:",
 IncidentsDataframeAggregator.get_version(), "Numerosita' df:",
 incidenti.df_incidenti.shape[0])