def make_problems_for_BritSiv():
    """Brit Siv ønsket oversikt over varslede skredprobelemr og faregrader for Indre Fjordane of Fjordane
    de to siste årene (2015-2017).    """

    output_filename = '{0}Skredproblemer Indre Fjordane for BritSiv.csv'.format(
        env.output_folder)
    pickle_file_name = '{0}runavalancheproblems_britsiv.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Fjordane 2015-16
        region_id = 121
        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3027
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    all_problems = []
    for d in all_dangers:
        all_problems += d.avalanche_problems
    all_problems.sort(key=lambda AvalancheProblem: AvalancheProblem.date)

    _save_problems(all_problems, output_filename)
def make_forecasts_for_Christian():
    """Christian Jaedicke ønsker oversikt over varsel og skredproblemer siste tre år i Narvik."""

    pickle_file_name = '{0}forecasts_ofoten_christian.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Narvik 2014-15 and 2015-16
        region_id = 114

        from_date, to_date = gm.get_forecast_dates('2014-15')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3015
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel Ofoten for Christian.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
def make_forecasts_for_Sander():
    """2018 August: Hei igjen Ragnar.
    Har du statistikk på varsla faregrad over ein heil sesong for Noreg? Eit snitt. XX dagar med faregrad 1,
    XX dagar med faregrad 2, XX dagar med faregrad 3.... fordelt på XX varslingsdagar.

    :return:
    """

    pickle_file_name = '{0}201808_avalanche_forecasts_sander.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201808 Faregrader for Sander.txt'.format(
        env.output_folder)

    import pandas as pd

    all_dangers_dict = []
    for a in all_dangers:
        all_dangers_dict.append(a.__dict__)

    col_names = list(all_dangers_dict[0].keys())
    all_dangers_df = pd.DataFrame(all_dangers_dict,
                                  columns=col_names,
                                  index=range(0, len(all_dangers_dict), 1))

    a = 1
def get_data(from_date, to_date, region_ids, pickle_file_name_1, get_new):
    """Time consuming and inefficient. Not proud..

    :param from_date:
    :param to_date:
    :param region_ids:
    :param pickle_file_name_1:
    :param get_new:
    :return:
    """

    if get_new:
        # get all data and save to pickle
        all_incidents = go.get_incident(from_date,
                                        to_date,
                                        region_ids=region_ids,
                                        geohazard_tids=10)
        all_forecasts = gd.get_forecasted_dangers(region_ids, from_date,
                                                  to_date)
        mp.pickle_anything([all_forecasts, all_incidents], pickle_file_name_1)
    else:
        # load data from pickle
        all_forecasts, all_incidents = mp.unpickle_anything(pickle_file_name_1)

    return all_forecasts, all_incidents
def get_2016_17_warnings(how_to_get_data='Get new and dont pickle', pickle_file_name=None):
    '''

    :param hot_to_get_data:     'Get new and dont pickle', 'Get new and save pickle' or 'Load pickle'
    :param file_name:           Not needed if no pickles involved
    :return:
    '''

    if 'Get new' in how_to_get_data:

        from_date = dt.date(2016, 11, 30)
        #to_date = dt.date.today()
        to_date = dt.date(2017, 5, 31)

        #region_ids = [3012, 3013]
        region_ids = gm.get_forecast_regions(year='2016-17')

        all_warnings = []
        for region_id in region_ids:
            all_warnings += gd.get_forecasted_dangers(region_id, from_date, to_date, include_ikke_vurdert=False)

        # Sort by date
        all_warnings = sorted(all_warnings, key=lambda AvalancheDanger: AvalancheDanger.date)

        if 'and save pickle' in how_to_get_data:
            mp.pickle_anything(all_warnings, pickle_file_name)

    elif 'Load pickle' in how_to_get_data:
        all_warnings = mp.unpickle_anything(pickle_warnings_file_name)

    else:
        all_warnings = 'No valid data retrival method given in get_2015_16_warnings.'

    return all_warnings
def get_all_ofoten():
    """Dangers and problems for Ofoten (former Narvik). Writes file to .csv"""

    get_new = True
    get_observations = False
    write_csv = True
    plot_dangerlevels_simple = False

    select_years = [
        '2012-13', '2013-14', '2014-15', '2015-16', '2016-17', '2017-18'
    ]
    region_id_Narvik = 114  # Narvik used from 2012 until nov 2016
    region_id_Ofoten = 3015  # Ofoten introduced in november 2016

    warnings_pickle = '{0}allforecasteddangerlevels_Ofoten_201218.pickle'.format(
        env.local_storage)
    warnings_csv = '{0}Faregrader Ofoten 2012-18.csv'.format(env.output_folder)
    warnings_plot = '{0}Faregrader Ofoten 2012-18.png'.format(
        env.output_folder)

    if get_new:
        all_warnings = []
        all_evaluations = []

        for y in select_years:

            if y in ['2016-17', '2017-18']:
                region_id = region_id_Ofoten
            else:
                region_id = region_id_Narvik

            from_date, to_date = gm.get_forecast_dates(year=y)

            all_warnings += gd.get_forecasted_dangers(region_id, from_date,
                                                      to_date)
            if get_observations:
                all_evaluations += go.get_avalanche_evaluation_3(
                    from_date, to_date, region_id)

        mp.pickle_anything([all_warnings, all_evaluations], warnings_pickle)

    else:
        [all_warnings, all_evaluations] = mp.unpickle_anything(warnings_pickle)

    if write_csv:
        # write to csv files
        _save_danger_and_problem_to_file(all_warnings, warnings_csv)

    elif plot_dangerlevels_simple:
        # Make simple plot
        from_date = gm.get_forecast_dates(select_years[0])[0]
        to_date = gm.get_forecast_dates(select_years[-1])[1]
        _make_plot_dangerlevels_simple(all_warnings, all_evaluations,
                                       warnings_plot, from_date, to_date)

    else:
        print("No output selected")

    return all_warnings, all_evaluations
Exemplo n.º 7
0
def get_varsom_incidents(add_forecast_regions=False, add_observations=False, add_forecasts=False):
    """Returns the incidents shown on varsom.no in a list of VarsomIncident objects.
    Data input is a utf-8 formatted csv file in input folder. Original file might have newlines and
    semicolons (;) in the cells. These need to be removed before saving as csv.

    :param add_forecast_regions:    [bool] If true the regid is used to get coordinates and the forecast region at the
                                    observation date is added. Note, if true, some time is to be expected getting data.
    :param add_observations:        [bool] If true the observation is added when looking up the region name. This
                                    option is only taken into account if add_forecast_regions is true.
    :param add_forecasts:           [bool] If true the forecast at that time and place is added to the incident. This
                                    option is only taken into account if add_forecast_regions is true.
    """

    # incidents_file = '{}varsomsineskredulykker.csv'.format(env.varsom_incidents)
    incidents_file = '{}varsomincidents3.csv'.format(env.varsom_incidents)
    varsom_incidents = rf.read_csv_file(incidents_file, VarsomIncident)

    # map incident to forecast region
    if add_forecast_regions:
        for i in varsom_incidents:
            if i.regid == []:
                ml.log_and_print("[warning] getmisc.py -> get_varsom_incidents: No regid on incident on {}. No forecast region found.".format(i.date))
            else:
                region_id, region_name, observation = get_forecast_region_for_regid(i.regid[0])
                i.add_forecast_region(region_id, region_name)
                print("regid {}: {}".format(i.regid[0], i.date))

                if add_observations:
                    i.add_observation(observation[0])
                    if len(i.regid) > 1:
                        observations = go.get_all_observations(reg_ids=i.regid[1:])
                        for o in observations:
                            i.add_observation(o)

        if add_forecasts:
            years = ['2014-15', '2015-16', '2016-17', '2017-18', '2018-19']        # the years with data

            all_forecasts = []
            for y in years:
                region_ids = get_forecast_regions(year=y)
                from_date, to_date = get_forecast_dates(y)
                all_forecasts += gd.get_forecasted_dangers(region_ids, from_date, to_date)

            for i in varsom_incidents:
                incident_date = i.date
                incident_region_id = i.region_id
                print("{}: {}".format(i.location, incident_date))
                for f in all_forecasts:
                    forecast_date = f.date
                    forecast_region_id = f.region_regobs_id
                    if incident_date == forecast_date:
                        if incident_region_id == forecast_region_id:
                            i.add_forecast(f)

    return varsom_incidents
def make_forecasts_for_Thea():
    """July 2018: Make list of avalanche forecasts danger levels for regions Voss, Romsdalen, Svartisen
    and Salten (and those before them) for Thea Møllerhaug Lunde (Jernbanedirektoratet).

    Voss-Bergen ligger i for det meste i Voss-regionen vår.
    Mo i Rana-Fauske ligger i Svartisen og Salten.
    Åndalsnes-Bjorli ligger i varslingsregionen Romsdalen."""

    pickle_file_name = '{0}201807_avalanche_forecasts_thea.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Romsdalen. ForecastRegionTID 118 from 2012-2016 and 3023 since.
        # Get Svartisen. ForecastRegionTID 131 from 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 118, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3023, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Thea.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    pass
def make_forecasts_for_Heidi():
    """July 2018: Make list of avalanche forecasts for regions Voss, Svartisen og Fauske (and those before them)
    for Heidi Bjordal SVV"""

    pickle_file_name = '{0}201807_avalanche_forecasts_heidi.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Svartisen. ForecastRegionTID 131 form 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Heidi.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')

    pass
def make_avalanche_problemes_for_techel():
    """Gets forecastes and observed avalanche problems and dangers for Frank Techel.

    Takes 20-30 min to run a year.

    :return:
    """

    pickle_file_name = '{0}runavalancheproblems_techel.pickle'.format(
        env.local_storage)

    years = ['2014-15', '2015-16', '2016-17', '2017-18']
    get_new = False

    if get_new:
        forecast_problems = []
        forecast_dangers = []
        observed_dangers = []
        observed_problems = []

        for y in years:
            # Get forecast data. Different region ids from year to year.
            region_ids = gm.get_forecast_regions(year=y)
            from_date, to_date = gm.get_forecast_dates(y)
            forecast_problems += gp.get_forecasted_problems(region_ids,
                                                            from_date,
                                                            to_date,
                                                            lang_key=2)
            forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                          from_date,
                                                          to_date,
                                                          lang_key=2)

            # Get observed data. All older data in regObs have been mapped to new regions.
            region_ids = gm.get_forecast_regions(year='2016-17')
            from_date, to_date = gm.get_forecast_dates(
                y, padding=dt.timedelta(days=20))
            this_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                                  from_date,
                                                                  to_date,
                                                                  lang_key=2)
            this_years_observed_problems = gp.get_observed_problems(region_ids,
                                                                    from_date,
                                                                    to_date,
                                                                    lang_key=2)

            # Update observations with forecast region ids and names used the respective years
            for od in this_years_observed_dangers:
                utm33x = od.metadata['Original data'].UTMEast
                utm33y = od.metadata['Original data'].UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                od.region_regobs_id = region_id
                od.region_name = region_name

            for op in this_years_observed_problems:
                utm33x = op.metadata['Original data']['UtmEast']
                utm33y = op.metadata['Original data']['UtmNorth']
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                op.region_regobs_id = region_id
                op.region_name = region_name

            observed_dangers += this_years_observed_dangers
            observed_problems += this_years_observed_problems

        mp.pickle_anything([
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ], pickle_file_name)

    else:
        [
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ] = mp.unpickle_anything(pickle_file_name)

    # Run EAWS mapping on all problems
    for p in forecast_problems:
        p.map_to_eaws_problems()

    for p in observed_problems:
        p.map_to_eaws_problems()

    output_forecast_problems = '{0}Techel forecast problems.csv'.format(
        env.output_folder)
    output_forecast_dangers = '{0}Techel forecast dangers.csv'.format(
        env.output_folder)
    output_observed_problems = '{0}Techel observed problems.csv'.format(
        env.output_folder)
    output_observed_dangers = '{0}Techel observed dangers.csv'.format(
        env.output_folder)

    import collections as coll

    # Write observed dangers to file
    with open(output_observed_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in observed_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(d.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Municipal', d.municipal_name),
                ('Nick', d.nick),
                ('Competence', d.competence_level),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                ('Forecast correct', d.forecast_correct),
                # ('Table', d.data_table),
                # ('URL', d.url),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted dangers to file
    with open(output_forecast_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in forecast_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Nick', d.nick),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                # ('Table', d.data_table),
                # ('URL', d.url),
                ('Main message', ' '.join(d.main_message_en.split()))
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted problems to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in forecast_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Nick', p.nick_name),
                ('Problem order', p.order),
                ('Problem', p.problem),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                ('Probability', p.aval_probability),
                ('Distribution', p.aval_distribution),
                ('DL', p.danger_level),
                ('Danger level', p.danger_level_name),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write observed problems to file
    with open(output_observed_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in observed_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(p.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Municipal', p.municipal_name),
                ('Nick', p.nick_name),
                ('Competence', p.competence_level),
                # ('Problem order', p.order),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Catch 1', p.cause_attribute_crystal),
                ('Catch 2', p.cause_attribute_light),
                ('Catch 3', p.cause_attribute_soft),
                ('Catch 4', p.cause_attribute_thin),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                # ('Probability', p.aval_probability),
                # ('Distribution', p.aval_distribution),
                # ('RegID', p.regid),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')
def make_dl_incident_markus():
    """
    From the beginning of time:

    get all forecasts.
    and then get how many on dl 3.

    get all incidents,
    excpt elrapp, and all in back country

    all these, get all on days in regions of dl 3.
    get all with serious caracter on days and in regions on dl 3

    :return:
    """

    pickle_file_name = '{0}incident_on_dl3_for_markus.pickle'.format(
        env.local_storage)
    years = ['2012-13', '2013-14', '2014-15', '2015-16', '2016-17']
    get_new = False

    all_dangers = []
    all_incidents = []

    if get_new:
        for y in years:

            # get forecast regions used this year
            from_date, to_date = gm.get_forecast_dates(y)

            # get incidents for this year and map to this years forecast regions
            this_year_incidents = go.get_incident(from_date,
                                                  to_date,
                                                  geohazard_tids=10)
            for i in this_year_incidents:
                utm33x = i.UTMEast
                utm33y = i.UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                i.region_regobs_id = region_id
                i.region_name = region_name
            all_incidents += this_year_incidents

            # get regions and the forecasts used this year
            region_ids = gm.get_forecast_regions(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        # in the end, pickle it all
        mp.pickle_anything([all_dangers, all_incidents], pickle_file_name)

    else:
        [all_dangers, all_incidents] = mp.unpickle_anything(pickle_file_name)

    all_dl3 = []
    for d in all_dangers:
        if d.danger_level == 3:
            all_dl3.append(d)

    all_back_country_incidents = []
    for i in all_incidents:
        if 'drift@svv' not in i.NickName:
            # if activity influenced is backcounty og scooter
            # should probably include 100 which is non specified incidents
            # giving this dataset the observations not specified
            if i.ActivityInfluencedTID in [
                    100, 110, 111, 112, 113, 114, 115, 116, 117, 130
            ]:
                all_back_country_incidents.append(i)

    all_back_country_incidents_with_consequence = []
    for i in all_back_country_incidents:
        # If damageextent is nestenulykke, personer skadet eller personer omkommet
        if i.DamageExtentTID > 28:
            all_back_country_incidents_with_consequence.append(i)

    # find incidents in regions on days with danger level 3
    # find incidetns in region on day with dl3
    all_back_country_incidents_on_region_dl3 = []
    all_back_country_incidents_with_consequence_on_region_dl3 = []

    for d in all_dl3:
        danger_date = d.date
        danger_region_id = d.region_regobs_id

        for i in all_back_country_incidents:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_on_region_dl3.append(i)

        for i in all_back_country_incidents_with_consequence:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_with_consequence_on_region_dl3.append(
                    i)

    print('Totalt varsler laget siden tidenes morgen: {}'.format(
        len(all_dangers)))
    print('Totalt varsler på fg 3: {}'.format(len(all_dl3)))
    print('Totalt antall hendelser i baklandet: {}'.format(
        len(all_back_country_incidents)))
    print('Totalt antall hendelser i baklandet med konsekvens: {}'.format(
        len(all_back_country_incidents_with_consequence)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3: {}'.
        format(len(all_back_country_incidents_on_region_dl3)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3 med konsekvens: {}'
        .format(
            len(all_back_country_incidents_with_consequence_on_region_dl3)))

    return
Exemplo n.º 12
0
def make_forecasts_for_Espen_at_sweco():
    """Hei. I forbindelse med et prosjekt i Sørreisa i Troms ønsker vi å gi råd til vår kunde om evakuering av bygg
    i skredutsatt terreng. Som en del av vår vurdering hadde det vært veldig nyttig med statistikk for varslingen,
    altså statistikk om hvor ofte de ulike faregradene er varslet. Er det mulig å få tak i slik statistikk?
    Gjerne så langt tilbake i tid som mulig. Vennlig hilsen Espen Eidsvåg"""

    pickle_file_name = '{0}forecasts_sorreisa_espen.pickle'.format(
        env.local_storage)

    get_new = True
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [110, 112]  # Senja, Bardu
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        years = ['2016-17', '2017-18', '2018-19']
        region_ids = [3012, 3013]  # Sør Troms, Indre Troms
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel for Sørreisa.Espen Eidsvåg Sweco.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
Exemplo n.º 13
0
forecast_problems = []
forecast_dangers = []
observed_dangers = []
observed_problems = []

for y in years:
    # Get forecast data. Different region ids from year to year.
    region_ids = gm.get_forecast_regions(year=y)
    from_date, to_date = gm.get_forecast_dates(y)
    forecast_problems += gp.get_forecasted_problems(region_ids,
                                                    from_date,
                                                    to_date,
                                                    lang_key=1)
    forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                  from_date,
                                                  to_date,
                                                  lang_key=1)

    # Get observed data. All older data in regObs have been mapped to new regions.
    region_ids = gm.get_forecast_regions(year='2016-17')
    from_date, to_date = gm.get_forecast_dates(y,
                                               padding=dt.timedelta(days=20))
    current_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                             from_date,
                                                             to_date,
                                                             lang_key=1)
    current_years_observed_problems = gp.get_observed_problems(region_ids,
                                                               from_date,
                                                               to_date,
                                                               lang_key=1)
Exemplo n.º 14
0
def step_1_make_data_set(region_ids, from_date, to_date):
    """Makes the data set of all observed avalanche activity (including signs and single avalanches obs) and maps
    to forecasts for the days they are observed.

    :param region_ids:  [int or list of ints]
    :param from_date:   [date]
    :param to_date:     [date]
    :return date_region, forecasted_dangers:    [], []
    """

    # get all data
    dangers = gd.get_forecasted_dangers(region_ids, from_date, to_date)
    avalanches = go.get_avalanche_activity(from_date, to_date, region_ids)
    avalanches += go.get_avalanche_activity_2(from_date, to_date, region_ids)
    single_avalanches = go.get_avalanche(from_date, to_date, region_ids)
    danger_signs = go.get_danger_sign(from_date, to_date, region_ids)

    # List of only forecasts
    forecasted_dangers = []
    for d in dangers:
        if 'Forecast' in d.data_table and d.danger_level != 0:
            forecasted_dangers.append(d)

    # List of only valid activity observations
    observed_activity = []
    for a in avalanches:
        if a.EstimatedNumName is not None:
            if not 'Ikke gitt' in a.EstimatedNumName:
                if a.DestructiveSizeName is None:
                    a.DestructiveSizeName = 'Ikke gitt'
                observed_activity.append(a)

    # list of relevant danger observations
    danger_sign_avalanches = []
    for ds in danger_signs:
        if 'Ferske skred' in ds.DangerSignName or 'Ingen faretegn observert' in ds.DangerSignName:
            danger_sign_avalanches.append(ds)

    # list of relevant singel avalanches
    observed_avalanche = []
    for sa in single_avalanches:
        if sa.DestructiveSizeName is not None:
            if 'Ikke gitt' not in sa.DestructiveSizeName:
                observed_avalanche.append(sa)

    # Make list of all regions pr date and append forecasts and observations.
    data_date_region = []
    for d in forecasted_dangers:
        danger_date = d.date
        print('{0}'.format(danger_date))
        danger_region_name = d.region_name

        data = DataOnDateInRegion(danger_date, danger_region_name)
        data.forecast.append(d)

        for a in observed_activity:
            aval_date = a.DtObsTime.date()
            if isinstance(a, go.AvalancheActivityObs):
                if a.DtAvalancheTime is not None:
                    aval_date = a.DtAvalancheTime.date()
            if isinstance(a, go.AvalancheActivityObs2):
                if a.DtMiddleTime is not None:
                    aval_date = a.DtMiddleTime.date()

            aval_region_name = a.ForecastRegionName
            if aval_date == danger_date and aval_region_name == danger_region_name:
                data.avalanche_activity.append(a)

        for da in danger_sign_avalanches:
            aval_date = da.DtObsTime.date()
            aval_region_name = da.ForecastRegionName
            if aval_date == danger_date and aval_region_name == danger_region_name:
                data.danger_sign.append(da)

        for oa in observed_avalanche:
            aval_date = oa.DtObsTime.date()
            if oa.DtAvalancheTime is not None:
                aval_date = oa.DtAvalancheTime.date()
            aval_region_name = oa.ForecastRegionName
            if aval_date == danger_date and aval_region_name == danger_region_name:
                data.avalanche.append(oa)

        data_date_region.append(data)

    # discard days and regions where no observations present
    date_region = []
    for d in data_date_region:
        if not len(d.avalanche_activity) == 0 or not len(
                d.danger_sign) == 0 or not len(d.avalanche) == 0:
            date_region.append(d)

    return date_region, forecasted_dangers