def make_problems_for_BritSiv():
    """Brit Siv ønsket oversikt over varslede skredprobelemr og faregrader for Indre Fjordane of Fjordane
    de to siste årene (2015-2017).    """

    output_filename = '{0}Skredproblemer Indre Fjordane for BritSiv.csv'.format(
        env.output_folder)
    pickle_file_name = '{0}runavalancheproblems_britsiv.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Fjordane 2015-16
        region_id = 121
        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3027
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    all_problems = []
    for d in all_dangers:
        all_problems += d.avalanche_problems
    all_problems.sort(key=lambda AvalancheProblem: AvalancheProblem.date)

    _save_problems(all_problems, output_filename)
def get_data(from_date, to_date, region_ids, pickle_file_name_1, get_new):
    """Time consuming and inefficient. Not proud..

    :param from_date:
    :param to_date:
    :param region_ids:
    :param pickle_file_name_1:
    :param get_new:
    :return:
    """

    if get_new:
        # get all data and save to pickle
        all_incidents = go.get_incident(from_date,
                                        to_date,
                                        region_ids=region_ids,
                                        geohazard_tids=10)
        all_forecasts = gd.get_forecasted_dangers(region_ids, from_date,
                                                  to_date)
        mp.pickle_anything([all_forecasts, all_incidents], pickle_file_name_1)
    else:
        # load data from pickle
        all_forecasts, all_incidents = mp.unpickle_anything(pickle_file_name_1)

    return all_forecasts, all_incidents
def get_all_ofoten():
    """Dangers and problems for Ofoten (former Narvik). Writes file to .csv"""

    get_new = True
    get_observations = False
    write_csv = True
    plot_dangerlevels_simple = False

    select_years = [
        '2012-13', '2013-14', '2014-15', '2015-16', '2016-17', '2017-18'
    ]
    region_id_Narvik = 114  # Narvik used from 2012 until nov 2016
    region_id_Ofoten = 3015  # Ofoten introduced in november 2016

    warnings_pickle = '{0}allforecasteddangerlevels_Ofoten_201218.pickle'.format(
        env.local_storage)
    warnings_csv = '{0}Faregrader Ofoten 2012-18.csv'.format(env.output_folder)
    warnings_plot = '{0}Faregrader Ofoten 2012-18.png'.format(
        env.output_folder)

    if get_new:
        all_warnings = []
        all_evaluations = []

        for y in select_years:

            if y in ['2016-17', '2017-18']:
                region_id = region_id_Ofoten
            else:
                region_id = region_id_Narvik

            from_date, to_date = gm.get_forecast_dates(year=y)

            all_warnings += gd.get_forecasted_dangers(region_id, from_date,
                                                      to_date)
            if get_observations:
                all_evaluations += go.get_avalanche_evaluation_3(
                    from_date, to_date, region_id)

        mp.pickle_anything([all_warnings, all_evaluations], warnings_pickle)

    else:
        [all_warnings, all_evaluations] = mp.unpickle_anything(warnings_pickle)

    if write_csv:
        # write to csv files
        _save_danger_and_problem_to_file(all_warnings, warnings_csv)

    elif plot_dangerlevels_simple:
        # Make simple plot
        from_date = gm.get_forecast_dates(select_years[0])[0]
        to_date = gm.get_forecast_dates(select_years[-1])[1]
        _make_plot_dangerlevels_simple(all_warnings, all_evaluations,
                                       warnings_plot, from_date, to_date)

    else:
        print("No output selected")

    return all_warnings, all_evaluations
def make_forecasts_for_Christian():
    """Christian Jaedicke ønsker oversikt over varsel og skredproblemer siste tre år i Narvik."""

    pickle_file_name = '{0}forecasts_ofoten_christian.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Narvik 2014-15 and 2015-16
        region_id = 114

        from_date, to_date = gm.get_forecast_dates('2014-15')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3015
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel Ofoten for Christian.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
Пример #5
0
def get_kdv(x_kdv, get_new=False):
    """Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
    An x_kdv is requested from the regObs api if a pickle file newer than a week exists.

    :param x_kdv:   [string]    x_kdv view
    :return dict:   {}          x_kdv as a dictionary

    Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
    Ex of url for returning values for IceCoverKDV in norwegian:
    http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json
    """

    kdv_file = '{0}{1}.pickle'.format(se.kdv_elements_folder, x_kdv)
    dict = {}

    if get_new:
        url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
            .format(se.odata_version, x_kdv)

        ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from URL:{0}".format(url))

        kdv = requests.get(url).json()

        for a in kdv['d']['results']:
            try:
                if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26:      # this table gets special treatment
                    dict[a["ID"]] = a["Description"]
                else:
                    dict[a["ID"]] = a["Name"]
            except (RuntimeError, TypeError, NameError):
                pass

            mp.pickle_anything(dict, kdv_file)

    else:
        if os.path.exists(kdv_file):

            # Useful to test if the file is old and if so make a new one
            max_file_age = 7
            mtime = os.path.getmtime(kdv_file)
            last_modified_date = dt.datetime.fromtimestamp(mtime).date()
            date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)

            # If file older than date limit, request a new.
            if last_modified_date < date_limit.date():
                dict = get_kdv(x_kdv, get_new=True)
            else:
                # ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from pickle:{0}".format(kdv_file))
                dict = mp.unpickle_anything(kdv_file, print_message=False)

        else:
            dict = get_kdv(x_kdv, get_new=True)

    return dict
def make_forecasts_for_Sander():
    """2018 August: Hei igjen Ragnar.
    Har du statistikk på varsla faregrad over ein heil sesong for Noreg? Eit snitt. XX dagar med faregrad 1,
    XX dagar med faregrad 2, XX dagar med faregrad 3.... fordelt på XX varslingsdagar.

    :return:
    """

    pickle_file_name = '{0}201808_avalanche_forecasts_sander.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201808 Faregrader for Sander.txt'.format(
        env.output_folder)

    import pandas as pd

    all_dangers_dict = []
    for a in all_dangers:
        all_dangers_dict.append(a.__dict__)

    col_names = list(all_dangers_dict[0].keys())
    all_dangers_df = pd.DataFrame(all_dangers_dict,
                                  columns=col_names,
                                  index=range(0, len(all_dangers_dict), 1))

    a = 1
Пример #7
0
def make_m3_figs(forecaster_dict, nick, path=''):
    """Makes m3 tables for each forecaster. Uses methods from the runmatrix module.

    :param forecaster_dict:
    :param nick:            is how I can select relevant warnings for this forecaster
    :param product_folder:  location where plots (endproduct) is saved
    :param project_folder:  many files generated; make project folder in product folder
    :return:
    """

    from varsomscripts import matrix as mx

    f = forecaster_dict[nick]
    # select only warnings for this forecaster
    one_forecaster_warnings = f.warnings

    # prepare dataset
    pickle_data_set_file_name = '{0}runforefollow data set {1}.pickle'.format(
        env.local_storage, f.observer_id)
    mx.pickle_data_set(one_forecaster_warnings,
                       pickle_data_set_file_name,
                       use_ikke_gitt=False)
    forecaster_data_set = mp.unpickle_anything(pickle_data_set_file_name)

    # prepare the m3 elementes (cell contents)
    pickle_m3_v2_file_name = '{0}runforefollow m3 {1}.pickle'.format(
        env.local_storage, f.observer_id)
    mx.pickle_M3(forecaster_data_set, 'matrixconfiguration.v2.csv',
                 pickle_m3_v2_file_name)
    m3_v2_elements = mp.unpickle_anything(pickle_m3_v2_file_name)

    # plot
    plot_m3_v2_file_name = '{0}{1}_m3'.format(path, f.observer_id)
    mx.plot_m3_v2(m3_v2_elements, plot_m3_v2_file_name)

    return
Пример #8
0
def get_data(region_id, start_date, end_date, get_new=True):
    """Gets all the data needed in the plots and pickles it so that I don't need to do requests to make plots.

    :param region_id:       [int] Region ID is an int as given i ForecastRegionKDV
    :param start_date:      [string] Start date.
    :param end_date:        [string] End date.
    :param get_new:         [bool] If true, new data is requested. If false, a local pickle is used for data.
    :return problems, dangers, aval_indexes:
    """

    file_name = "{3}plotdangerandproblem_region{0}_{1}{2}.pickle".format(
        region_id, start_date.strftime('%Y'), end_date.strftime('%y'),
        env.local_storage)

    if not get_new and not os.path.exists(file_name):
        get_new = True
        ml.log_and_print(
            "[info] {0}get_data: pickle missing, getting new data.".format(
                log_reference),
            print_it=True)

    if get_new:
        dangers = gd.get_all_dangers(region_id, start_date, end_date)

        # Early years don't have the avalanche problem we will be analyzing
        if start_date > dt.date(2014, 11, 1):
            problems = gp.get_all_problems(region_id,
                                           start_date,
                                           end_date,
                                           add_danger_level=False)
        else:
            problems = []

        aval_indexes = gm.get_avalanche_index(start_date,
                                              end_date,
                                              region_ids=region_id)
        mp.pickle_anything([problems, dangers, aval_indexes], file_name)

    else:
        problems, dangers, aval_indexes = mp.unpickle_anything(file_name)

    return problems, dangers, aval_indexes
Пример #9
0
def _map_obs_to_old_regions(obs, make_new=True):

    picle_file_name = '{}all observations with old coords.pickle'.format(env.local_storage)

    if make_new:

        for o in obs:
            utm_n = o.UTMNorth
            utm_e = o.UTMEast
            date = o.DtObsTime
            year = gm.get_season_from_date(date)
            region_id, region_name = gm.get_forecast_region_for_coordinate(utm_e, utm_n, year)
            o.ForecastRegionName = region_name
            o.ForecastRegionTID = region_id

        mp.pickle_anything(obs, picle_file_name)
        return obs

    else:
        obs_old_coords = mp.unpickle_anything(picle_file_name)
        return obs_old_coords
def make_avalanche_problemes():

    data_output_filename = '{0}Alle skredproblemer.csv'.format(
        env.output_folder)
    pickle_file_name = '{0}runavalancheproblems.pickle'.format(
        env.local_storage)

    get_new = True

    if get_new:
        region_ids = [118, 128, 117]
        from_date = dt.date(2012, 12, 31)
        to_date = dt.date(2015, 7, 1)
        data = gp.get_all_problems(region_ids, from_date, to_date)

        mp.pickle_anything(data, pickle_file_name)

    else:
        data = mp.unpickle_anything(pickle_file_name)

    _save_problems_simple(data, data_output_filename)

    return
def get_incident_list(all_incidents, all_forecasts, desired_damage_extent_kdv,
                      pickle_file_name_2, make_new_incident_list):
    """Each row in the incident list contains Incident and Forecast objects where
    date and forecast region match AND where incidents match the damage extent we wish to study.

    :param all_incidents:
    :param all_forecasts:
    :param desired_damage_extent_kdv:
    :param pickle_file_name_2:
    :param make_new_incident_list:
    :return:
    """

    if make_new_incident_list:
        incident_list = []
        for incident in all_incidents:
            if incident.DamageExtentTID in desired_damage_extent_kdv.keys():
                incident_list.append(
                    IncidentAndForecasts(incident, all_forecasts))
        mp.pickle_anything(incident_list, pickle_file_name_2)
    else:
        incident_list = mp.unpickle_anything(pickle_file_name_2)

    return incident_list
Пример #12
0
def _get_all_snow(get_new=False):

    file_name = '{}observations and forecasts 2012-17.pickle'.format(
        env.local_storage)

    if get_new:
        all_observations = go.get_all_registrations('2012-12-01',
                                                    '2017-07-01',
                                                    geohazard_tids=10)

        years = ['2012-13', '2013-14', '2014-15', '2015-16', '2016-17']
        all_forecasts = []
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y)
            all_forecasts += gfa.get_avalanche_warnings(
                region_ids, from_date, to_date)

        mp.pickle_anything([all_observations, all_forecasts], file_name)

    else:
        [all_observations, all_forecasts] = mp.unpickle_anything(file_name)

    return all_observations, all_forecasts
Пример #13
0
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn

    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn,
                                                self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn

    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today() - dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path + '.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +
                                 '.pickle')

    mfd.write_mylake_inputfile(data2)
Пример #14
0
def calculate_and_plot9d_season(period='2018-19'):
    """Calculate ice columns for 9 days and make plots of all ice thickness for a given season or optionally 'Today'.

    The inner workings:
    1.1 Retrieves ice thickness observations from regObs. If period is given as a season, all observations for
        this season will be requested. All previous plots and local storage will be deleted.
    1.2 If period='Today' ice thickness observations from today will be requested and plotted. Older plots will be
        in the folder. Metadata dict will be merged.
    2.  Calculate the 9 day prognosis from the observation time and plots the result.
    3.  Make a metadata json for handling files on iskart.no. Only confirmed files in folder will be
        added to metadata json.

    :param period:    [String] Default is current season (2017-18).
    :return:
    """

    log_referance = 'calculateandplot.py -> calculate_and_plot9d_season'

    # File names
    regid_metadata_json = '{}regid_metadata.json'.format(
        se.ni_dogn_plots_folder)
    regid_metadata_pickle = '{}regid_metadata.pickle'.format(se.local_storage)

    if period == 'Today':
        ice_thicks = gro.get_ice_thickness_today()

    else:
        # Empty the 9dogn folder
        # for file in os.listdir(se.ni_dogn_plots_folder):
        #     file_path = os.path.join(se.ni_dogn_plots_folder, file)
        #     try:
        #         if os.path.isfile(file_path):
        #             os.unlink(file_path)
        #     except OSError:
        #         pass

        # remove pickle with metadata
        try:
            os.remove(regid_metadata_pickle)
        except OSError:
            pass

        # Get new observations
        ice_thicks = gro.get_ice_thickness_observations(period,
                                                        reset_and_get_new=True)

    # Calculate and plot
    for k, v in ice_thicks.items():

        # If file is missing, make it. If observation is older than 11 days it is based on gridded data for sure and no plot file needed.
        make_plot = False
        max_file_age = 11
        date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
        file_names = os.listdir(se.ni_dogn_plots_folder)
        plot_filename = '{0}.png'.format(k)
        if plot_filename not in file_names:
            make_plot = True
        else:
            if v.date.date() > date_limit.date():
                make_plot = True

        if make_plot:
            try:
                calculate_and_plot9d_regid(k,
                                           plot_folder=se.ni_dogn_plots_folder,
                                           observed_ice=v)
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print(
                    "[Error] {} Error making plot for {} {}".format(
                        log_referance, k, error_msg))

    # Make json with metadata for using files on iskart.no. Load metadata from pickle if available and
    # new observations where a plot is available will be made.
    if not os.path.exists(regid_metadata_pickle):
        regid_metadata = {}
    else:
        regid_metadata = mp.unpickle_anything(regid_metadata_pickle)

    list_of_plots = os.listdir(se.ni_dogn_plots_folder)

    for k, v in ice_thicks.items():
        # only add metadata on files that are in the folder
        if '{0}.png'.format(k) in list_of_plots:
            date = v.date.date()

            region_name = v.metadata['OriginalObject']['ForecastRegionName']
            if not region_name:
                region_name = 'Ukjent region'
            x, y = v.metadata['UTMEast'], v.metadata['UTMNorth']
            lake_id = v.metadata['LocationID']
            lake_name = v.metadata['LocationName']
            if not lake_name:
                lake_name = 'E{} N{}'.format(x, y)

            regid_metadata[k] = {
                'RegionName': region_name,
                'LakeID': lake_id,
                'LakeName': lake_name,
                'Date': '{}'.format(date)
            }

    mp.pickle_anything(regid_metadata, regid_metadata_pickle)

    json_string = json.dumps(regid_metadata,
                             ensure_ascii=False).encode('utf-8')
    with open(regid_metadata_json, 'wb') as f:
        f.write(json_string)
Пример #15
0
def plot_season_for_all_regobs_locations(year='2018-19',
                                         calculate_new=False,
                                         get_new_obs=False,
                                         make_plots=False,
                                         delete_old_plots=False):
    """Method specialized for scheduled plotting for iskart.no.
    Method makes a season plot for all ObsLocations in regObs where we have a first ice date.

    It may take some time to plot. 250 lakes for a season and for each plot weather params are requested from the GTS.

    The workings of the method:
    1.  get all locations ids and belonging observations where we have first ice.
    2.1 if calculate new, empty sesong folder and pickle in local storage and calculate (and make plots if requested).
    2.2 Make metadata json for showing files on iskart.no
    3.  All calculations are compared to observed data in scatter plot.

    :param year:                [String] Season for plotting. eg: '2016-17'
    :param calculate_new:       [bool] Calculate new ice thicks. If false only make the seasonal scatter.
    :param get_new_obs:         [bool]
    :param make_plots:          [bool]  If False all calculations are made, but only the scatter comparison against observatiosn is ploted
    :param delete_old_plots:    [bool]  If True all former plots and pickles are removed.
    """

    pickle_file_name_and_path = '{0}all_calculated_ice_{1}.pickle'.format(
        se.local_storage, year)
    location_id_metadata_json = '{}location_id_metadata.json'.format(
        se.sesong_plots_folder)

    if calculate_new:
        if delete_old_plots:
            # Empty the sesong plot folder
            for file in os.listdir(se.sesong_plots_folder):
                file_path = os.path.join(se.sesong_plots_folder, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except OSError:
                    pass

            # remove pickle old data - because we are getting new
            try:
                os.remove(pickle_file_name_and_path)
            except OSError:
                pass

        all_observations = gro.get_all_season_ice(year, get_new=get_new_obs)
        from_date, to_date = gm.get_dates_from_year(year)
        all_calculated = {}
        all_observed = {}
        location_id_metadata = {}

        for location_id, observed_ice in all_observations.items():
            try:
                calculated, observed, plot_filename = _plot_season(
                    location_id,
                    from_date,
                    to_date,
                    observed_ice,
                    make_plots=make_plots,
                    plot_folder=se.sesong_plots_folder)
                all_calculated[location_id] = calculated
                all_observed[location_id] = observed
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print(
                    "[error] calculateandplot.py -> plot_season_for_all_regobs_locations: Error making plot for {}"
                    .format(error_msg, location_id))

            # Make the json with metadata needed for iskart.no. Add only if the plot was made and thus file exists.
            if os.path.isfile(se.sesong_plots_folder + plot_filename):

                region_name = observed_ice[0].metadata['OriginalObject'][
                    'ForecastRegionName']
                if not region_name:
                    region_name = 'Ukjent region'
                lake_id = observed_ice[0].metadata['LocationID']
                x, y = observed_ice[0].metadata['UTMEast'], observed_ice[
                    0].metadata['UTMNorth']
                lake_name = observed_ice[0].metadata['LocationName']
                if not lake_name:
                    lake_name = 'E{} N{}'.format(x, y)

                location_id_metadata[location_id] = {
                    'RegionName': region_name,
                    'LakeID': lake_id,
                    'LakeName': lake_name,
                    'PlotFileName': plot_filename
                }

        mp.pickle_anything([all_calculated, all_observed],
                           pickle_file_name_and_path)

        try:
            json_string = json.dumps(location_id_metadata,
                                     ensure_ascii=False).encode('utf-8')
            with open(location_id_metadata_json, 'wb') as f:
                f.write(json_string)
        except:
            error_msg = sys.exc_info()[0]
            ml.log_and_print(
                "[error]calculateandplot.py -> plot_season_for_all_regobs_locations: Cant write json. {}"
                .format(error_msg))

    else:
        [all_calculated,
         all_observed] = mp.unpickle_anything(pickle_file_name_and_path)

    try:
        pts.scatter_calculated_vs_observed(all_calculated, all_observed, year)
    except:
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            "[error] calculateandplot.py -> plot_season_for_all_regobs_locations: {}. Could not plot scatter {}."
            .format(error_msg, year))
def incident_troms_winter_2018_for_markus():
    """Communication dated 2018-11-29

    Hei Ragnar og Jostein

    Kan en av dere hjelpe meg å ta ut et plott som viser antall registrerte ulykker og hendelser i
    varslingsregionene Tromsø, Lyngen, Sør-Troms og Indre-Troms for
    perioden 15.02 – 15.05.

    ...

    Er du interessert i det som ligger i registrert i
    regObs eller det som er kvalitetssikkert data  og ligger på varsom?

    Skal du ha hendelser som har hatt konsekvens?

    Skal hendelsene plottes i tid eller vises i kart?

    ...

    Varsom
    Ikke nødvendigvis konsekvens
    Tid

    :return:
    """

    pickle_file_name = '{0}incident_troms_winter_2018_for_markus.pickle'.format(
        env.local_storage)
    from_date = dt.date(2018, 2, 15)  # '2018-02-15'
    to_date = dt.date(2018, 5, 15)  # '2018-05-15'

    # Tromsø, Lyngen, Sør-Troms og Indre-Troms
    regions = [3011, 3010, 3012, 3013]

    get_new = False

    if get_new:
        all_varsom_incidents = gm.get_varsom_incidents(
            add_forecast_regions=True, add_observations=True)
        all_regobs_avalobs_and_incidents = go.get_data_as_class(
            from_date,
            to_date,
            registration_types=[11, 26],
            region_ids=regions,
            output='Nest')

        mp.pickle_anything(
            [all_varsom_incidents, all_regobs_avalobs_and_incidents],
            pickle_file_name)

    else:
        [all_varsom_incidents, all_regobs_avalobs_and_incidents
         ] = mp.unpickle_anything(pickle_file_name)

    varsom_incidents = mm.make_date_int_dict(start_date=from_date,
                                             end_date=to_date)
    regobs_avalobs_and_incidents = mm.make_date_int_dict(start_date=from_date,
                                                         end_date=to_date)

    for i in all_varsom_incidents:
        if from_date <= i.date <= to_date:
            if i.region_id in regions:
                if i.date in varsom_incidents.keys():
                    varsom_incidents[i.date] += 1

    for i in all_regobs_avalobs_and_incidents:
        if from_date <= i.DtObsTime.date() <= to_date:
            if i.ForecastRegionTID in regions:
                if i.DtObsTime.date() in regobs_avalobs_and_incidents.keys():
                    regobs_avalobs_and_incidents[i.DtObsTime.date()] += 1

    sum_varsom = sum(varsom_incidents.values())
    sum_regobs = sum(regobs_avalobs_and_incidents.values())

    varsom_incident_troms_winter_2018_for_markus = '{0}varsom_incident_troms_winter_2018_for_markus.csv'.format(
        env.output_folder)
    regobs_incident_troms_winter_2018_for_markus = '{0}regobs_incident_troms_winter_2018_for_markus.csv'.format(
        env.output_folder)

    with open(varsom_incident_troms_winter_2018_for_markus,
              'w',
              encoding='utf-8') as f:
        make_header = True
        for k, v in varsom_incidents.items():
            if make_header:
                f.write('date; number\n')
                make_header = False
            f.write('{}; {}\n'.format(k, v))

    with open(regobs_incident_troms_winter_2018_for_markus,
              'w',
              encoding='utf-8') as f:
        make_header = True
        for k, v in regobs_avalobs_and_incidents.items():
            if make_header:
                f.write('date; number\n')
                make_header = False
            f.write('{}; {}\n'.format(k, v))

    pass
def make_forecasts_at_incidents_for_sander():
    """Lager csv med alle varsomhendelser sammen med faregrad og de aktuelle skredproblemene 
    (svakt lag, skredtype og skredproblemnavnert). Der det er gjort en regObs observasjon 
    med «hendelse/ulykke» skjema fylt ut har jeg også lagt på skadeomfangsvurderingen.

    August 2018: Hei Jostein.

    Som du veit skal eg skriva om: Skredulykker knytt til skredproblem
    Du snakka om at det var muleg å få ut data for dette frå NVE sin database. Kan du hjelpa meg med det?

    Mvh
    Sander
    """

    pickle_file_name = '{0}dl_inci_sander.pickle'.format(env.local_storage)
    output_incident_and_dl = '{0}Hendelse og faregrad til Sander.csv'.format(
        env.output_folder)
    get_new = False

    if get_new:
        varsom_incidents = gm.get_varsom_incidents(add_forecast_regions=True,
                                                   add_forecasts=True,
                                                   add_observations=True)
        mp.pickle_anything(varsom_incidents, pickle_file_name)
    else:
        varsom_incidents = mp.unpickle_anything(pickle_file_name)

    incident_and_dl = []

    for i in varsom_incidents:

        incident_date = i.date
        danger_level = None

        problem_1 = None
        problem_2 = None
        problem_3 = None

        avalanche_type_1 = None
        avalanche_type_2 = None
        avalanche_type_3 = None

        weak_layer_1 = None
        weak_layer_2 = None
        weak_layer_3 = None

        dato_regobs = None
        damage_extent = None

        if i.forecast:
            danger_level = i.forecast.danger_level
            for p in i.forecast.avalanche_problems:
                if p.order == 1:
                    problem_1 = p.problem
                    weak_layer_1 = p.cause_name
                    avalanche_type_1 = p.aval_type
                if p.order == 2:
                    problem_2 = p.problem
                    weak_layer_2 = p.cause_name
                    avalanche_type_2 = p.aval_type
                if p.order == 3:
                    problem_3 = p.problem
                    weak_layer_3 = p.cause_name
                    avalanche_type_3 = p.aval_type

            if i.observations:
                dato_regobs = i.observations[0].DtObsTime.date()
                for obs in i.observations:
                    for o in obs.Observations:
                        if isinstance(o, go.Incident):
                            damage_extent = o.DamageExtentName

        incident_and_dl.append({
            'Dato': incident_date,
            # 'Dato (regObs)': dato_regobs,
            'Region': i.region_name,
            'Kommune': i.municipality,
            'Dødsfall': i.fatalities,
            'Alvorsgrad': damage_extent,
            'Involverte': i.people_involved,
            'Aktivitet': i.activity,
            'Faregrad': danger_level,
            'Skredproblem 1': problem_1,
            'Skredtype 1': avalanche_type_1,
            'Svaktlag 1': weak_layer_1,
            'Skredproblem 2': problem_2,
            'Skredtype 2': avalanche_type_2,
            'Svaktlag 2': weak_layer_2,
            'Skredproblem 3': problem_3,
            'Skredtype 3': avalanche_type_3,
            'Svaktlag 3': weak_layer_3,
            'Kommentar': i.comment,
            'regObs': '{}'.format(i.regid)
        })

    # Write observed problems to file
    with open(output_incident_and_dl, 'w', encoding='utf-8') as f:
        make_header = True
        for i in incident_and_dl:
            if make_header:
                f.write(' ;'.join([fe.make_str(d) for d in i.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d) for d in i.values()]).replace(
                '[', '').replace(']', '') + '\n')
def make_dl_incident_markus():
    """
    From the beginning of time:

    get all forecasts.
    and then get how many on dl 3.

    get all incidents,
    excpt elrapp, and all in back country

    all these, get all on days in regions of dl 3.
    get all with serious caracter on days and in regions on dl 3

    :return:
    """

    pickle_file_name = '{0}incident_on_dl3_for_markus.pickle'.format(
        env.local_storage)
    years = ['2012-13', '2013-14', '2014-15', '2015-16', '2016-17']
    get_new = False

    all_dangers = []
    all_incidents = []

    if get_new:
        for y in years:

            # get forecast regions used this year
            from_date, to_date = gm.get_forecast_dates(y)

            # get incidents for this year and map to this years forecast regions
            this_year_incidents = go.get_incident(from_date,
                                                  to_date,
                                                  geohazard_tids=10)
            for i in this_year_incidents:
                utm33x = i.UTMEast
                utm33y = i.UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                i.region_regobs_id = region_id
                i.region_name = region_name
            all_incidents += this_year_incidents

            # get regions and the forecasts used this year
            region_ids = gm.get_forecast_regions(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        # in the end, pickle it all
        mp.pickle_anything([all_dangers, all_incidents], pickle_file_name)

    else:
        [all_dangers, all_incidents] = mp.unpickle_anything(pickle_file_name)

    all_dl3 = []
    for d in all_dangers:
        if d.danger_level == 3:
            all_dl3.append(d)

    all_back_country_incidents = []
    for i in all_incidents:
        if 'drift@svv' not in i.NickName:
            # if activity influenced is backcounty og scooter
            # should probably include 100 which is non specified incidents
            # giving this dataset the observations not specified
            if i.ActivityInfluencedTID in [
                    100, 110, 111, 112, 113, 114, 115, 116, 117, 130
            ]:
                all_back_country_incidents.append(i)

    all_back_country_incidents_with_consequence = []
    for i in all_back_country_incidents:
        # If damageextent is nestenulykke, personer skadet eller personer omkommet
        if i.DamageExtentTID > 28:
            all_back_country_incidents_with_consequence.append(i)

    # find incidents in regions on days with danger level 3
    # find incidetns in region on day with dl3
    all_back_country_incidents_on_region_dl3 = []
    all_back_country_incidents_with_consequence_on_region_dl3 = []

    for d in all_dl3:
        danger_date = d.date
        danger_region_id = d.region_regobs_id

        for i in all_back_country_incidents:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_on_region_dl3.append(i)

        for i in all_back_country_incidents_with_consequence:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_with_consequence_on_region_dl3.append(
                    i)

    print('Totalt varsler laget siden tidenes morgen: {}'.format(
        len(all_dangers)))
    print('Totalt varsler på fg 3: {}'.format(len(all_dl3)))
    print('Totalt antall hendelser i baklandet: {}'.format(
        len(all_back_country_incidents)))
    print('Totalt antall hendelser i baklandet med konsekvens: {}'.format(
        len(all_back_country_incidents_with_consequence)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3: {}'.
        format(len(all_back_country_incidents_on_region_dl3)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3 med konsekvens: {}'
        .format(
            len(all_back_country_incidents_with_consequence_on_region_dl3)))

    return
Пример #19
0
def get_all_observations(year,
                         output='Nest',
                         geohazard_tids=None,
                         lang_key=1,
                         max_file_age=23):
    """Specialized method for getting all observations for one season (1. sept to 31. august).
    For the current season (at the time of writing, 2018-19), if request has been made the last 23hrs,
    data is retrieved from a locally stored pickle, if not, new request is made to the regObs api. Previous
    seasons are not requested if a pickle is found in local storage.

    :param year:                [string] Eg. season '2017-18' (sept-sept) or one single year '2018'
    :param output:              [string] 'Nest' or 'List'
    :param geohazard_tids:      [int or list of ints] Default None gives all. Note, pickle stores all, but this option returns a select
    :param lang_key             [int] 1 is norwegian, 2 is english
    :param max_file_age:        [int] hrs how old the file is before new is retrieved

    :return:
    """

    from_date, to_date = gm.get_dates_from_season(year=year)
    file_name_list = '{0}all_observations_list_{1}_lk{2}.pickle'.format(
        env.local_storage, year, lang_key)
    file_name_nest = '{0}all_observations_nest_{1}_lk{2}.pickle'.format(
        env.local_storage, year, lang_key)
    get_new = True
    date_limit = dt.datetime.now() - dt.timedelta(hours=max_file_age)

    # if we are well out of the current season (30 days) its little chance the data set has changed.
    current_season = gm.get_season_from_date(dt.date.today() -
                                             dt.timedelta(30))

    if geohazard_tids:
        if not isinstance(geohazard_tids, list):
            geohazard_tids = [geohazard_tids]

    if os.path.exists(file_name_list):
        # if file contains a season long gone, dont make new.
        if year == current_season:
            file_age = dt.datetime.fromtimestamp(
                os.path.getmtime(file_name_list))
            # If file is newer than the given time limit, dont make new.
            if file_age > date_limit:
                # If file size larger than that of an nearly empty file, dont make new.
                if os.path.getsize(file_name_list) > 100:  # 100 bytes limit
                    get_new = False
        else:
            get_new = False

    if get_new:
        # When get new, get all geo hazards
        nested_observations = go.get_data_as_class(from_date=from_date,
                                                   to_date=to_date,
                                                   output='Nest',
                                                   geohazard_tids=None,
                                                   lang_key=lang_key)

        mp.pickle_anything(nested_observations, file_name_nest)

        listed_observations = []
        for d in nested_observations:
            for o in d.Observations:
                if _observation_is_not_empty(o):
                    listed_observations.append(o)
            for p in d.Pictures:
                # p['RegistrationName'] = 'Bilde'
                listed_observations.append(p)

        mp.pickle_anything(listed_observations, file_name_list)

    if output == 'Nest':
        all_nested_observations = mp.unpickle_anything(file_name_nest)
        nested_observations = []

        if geohazard_tids:
            for o in all_nested_observations:
                if o.GeoHazardTID in geohazard_tids:
                    nested_observations.append(o)

        else:
            nested_observations = all_nested_observations

        return nested_observations

    elif output == 'List':
        all_listed_observations = mp.unpickle_anything(file_name_list)
        listed_observations = []

        if geohazard_tids:
            for o in all_listed_observations:
                if o.GeoHazardTID in geohazard_tids:
                    listed_observations.append(o)

        else:
            listed_observations = all_listed_observations

        return listed_observations

    else:
        ml.log_and_print(
            '[warning] getvarsompickles.py -> get_all_registrations: Unknown output option'
        )
        return []
def make_forecasts_for_Heidi():
    """July 2018: Make list of avalanche forecasts for regions Voss, Svartisen og Fauske (and those before them)
    for Heidi Bjordal SVV"""

    pickle_file_name = '{0}201807_avalanche_forecasts_heidi.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Svartisen. ForecastRegionTID 131 form 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Heidi.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')

    pass
def make_avalanche_problemes_for_techel():
    """Gets forecastes and observed avalanche problems and dangers for Frank Techel.

    Takes 20-30 min to run a year.

    :return:
    """

    pickle_file_name = '{0}runavalancheproblems_techel.pickle'.format(
        env.local_storage)

    years = ['2014-15', '2015-16', '2016-17', '2017-18']
    get_new = False

    if get_new:
        forecast_problems = []
        forecast_dangers = []
        observed_dangers = []
        observed_problems = []

        for y in years:
            # Get forecast data. Different region ids from year to year.
            region_ids = gm.get_forecast_regions(year=y)
            from_date, to_date = gm.get_forecast_dates(y)
            forecast_problems += gp.get_forecasted_problems(region_ids,
                                                            from_date,
                                                            to_date,
                                                            lang_key=2)
            forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                          from_date,
                                                          to_date,
                                                          lang_key=2)

            # Get observed data. All older data in regObs have been mapped to new regions.
            region_ids = gm.get_forecast_regions(year='2016-17')
            from_date, to_date = gm.get_forecast_dates(
                y, padding=dt.timedelta(days=20))
            this_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                                  from_date,
                                                                  to_date,
                                                                  lang_key=2)
            this_years_observed_problems = gp.get_observed_problems(region_ids,
                                                                    from_date,
                                                                    to_date,
                                                                    lang_key=2)

            # Update observations with forecast region ids and names used the respective years
            for od in this_years_observed_dangers:
                utm33x = od.metadata['Original data'].UTMEast
                utm33y = od.metadata['Original data'].UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                od.region_regobs_id = region_id
                od.region_name = region_name

            for op in this_years_observed_problems:
                utm33x = op.metadata['Original data']['UtmEast']
                utm33y = op.metadata['Original data']['UtmNorth']
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                op.region_regobs_id = region_id
                op.region_name = region_name

            observed_dangers += this_years_observed_dangers
            observed_problems += this_years_observed_problems

        mp.pickle_anything([
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ], pickle_file_name)

    else:
        [
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ] = mp.unpickle_anything(pickle_file_name)

    # Run EAWS mapping on all problems
    for p in forecast_problems:
        p.map_to_eaws_problems()

    for p in observed_problems:
        p.map_to_eaws_problems()

    output_forecast_problems = '{0}Techel forecast problems.csv'.format(
        env.output_folder)
    output_forecast_dangers = '{0}Techel forecast dangers.csv'.format(
        env.output_folder)
    output_observed_problems = '{0}Techel observed problems.csv'.format(
        env.output_folder)
    output_observed_dangers = '{0}Techel observed dangers.csv'.format(
        env.output_folder)

    import collections as coll

    # Write observed dangers to file
    with open(output_observed_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in observed_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(d.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Municipal', d.municipal_name),
                ('Nick', d.nick),
                ('Competence', d.competence_level),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                ('Forecast correct', d.forecast_correct),
                # ('Table', d.data_table),
                # ('URL', d.url),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted dangers to file
    with open(output_forecast_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in forecast_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Nick', d.nick),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                # ('Table', d.data_table),
                # ('URL', d.url),
                ('Main message', ' '.join(d.main_message_en.split()))
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted problems to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in forecast_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Nick', p.nick_name),
                ('Problem order', p.order),
                ('Problem', p.problem),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                ('Probability', p.aval_probability),
                ('Distribution', p.aval_distribution),
                ('DL', p.danger_level),
                ('Danger level', p.danger_level_name),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write observed problems to file
    with open(output_observed_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in observed_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(p.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Municipal', p.municipal_name),
                ('Nick', p.nick_name),
                ('Competence', p.competence_level),
                # ('Problem order', p.order),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Catch 1', p.cause_attribute_crystal),
                ('Catch 2', p.cause_attribute_light),
                ('Catch 3', p.cause_attribute_soft),
                ('Catch 4', p.cause_attribute_thin),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                # ('Probability', p.aval_probability),
                # ('Distribution', p.aval_distribution),
                # ('RegID', p.regid),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')
Пример #22
0
def make_forecasts_at_incidents_for_mala(get_new=False):
    """Lager csv med alle varsomhendelser sammen med faregrad og de aktuelle skredproblemene
    (svakt lag, skredtype og skredproblemnavnert). Der det er gjort en regObs observasjon
    med «hendelse/ulykke» skjema fylt ut har jeg også lagt på skadeomfangsvurderingen.
    """

    pickle_file_name = '{0}dl_inci_mala.pickle'.format(env.local_storage)
    output_incident_and_dl = '{0}incidents_mala.csv'.format(env.output_folder)

    if get_new:
        varsom_incidents = gm.get_varsom_incidents(add_forecast_regions=True,
                                                   add_forecasts=True,
                                                   add_observations=False)
        mp.pickle_anything(varsom_incidents, pickle_file_name)
    else:
        varsom_incidents = mp.unpickle_anything(pickle_file_name)

    incident_and_dl = []

    for i in varsom_incidents:

        incident_date = i.date
        danger_level = None

        problem_1 = None
        problem_2 = None
        problem_3 = None

        avalanche_type_1 = None
        avalanche_type_2 = None
        avalanche_type_3 = None

        weak_layer_1 = None
        weak_layer_2 = None
        weak_layer_3 = None

        dato_regobs = None
        damage_extent = None

        if i.forecast:
            danger_level = i.forecast.danger_level
            for p in i.forecast.avalanche_problems:
                if p.order == 1:
                    problem_1 = p.problem
                    weak_layer_1 = p.cause_name
                    avalanche_type_1 = p.aval_type
                if p.order == 2:
                    problem_2 = p.problem
                    weak_layer_2 = p.cause_name
                    avalanche_type_2 = p.aval_type
                if p.order == 3:
                    problem_3 = p.problem
                    weak_layer_3 = p.cause_name
                    avalanche_type_3 = p.aval_type

            if i.observations:
                dato_regobs = i.observations[0].DtObsTime.date()
                for obs in i.observations:
                    for o in obs.Observations:
                        if isinstance(o, go.Incident):
                            damage_extent = o.DamageExtentName

        incident_and_dl.append({
            'Date': incident_date,
            # 'Dato (regObs)': dato_regobs,
            'Region_id': i.region_id,
            'Region': i.region_name,
            'Fatalities': i.fatalities,
            'Damage_extent': damage_extent,
            'People_involved': i.people_involved,
            'Activity': i.activity,
            'Danger_level': danger_level,
            'Avalanche_problem_1': problem_1,
            'Avalanche_type_1': avalanche_type_1,
            'Weak_layer_1': weak_layer_1,
            'Avalanche_problem_2': problem_2,
            'Avalanche_type_2': avalanche_type_2,
            'Weak_layer_2': weak_layer_2,
            'Avalanche_problem_3': problem_3,
            'Avalanche_type_3': avalanche_type_3,
            'Weak_layer_3': weak_layer_3,
            'Comment': i.comment,
            'regObs_id': '{}'.format(i.regid)
        })

    # Write observed problems to file
    with open(output_incident_and_dl, 'w', encoding='utf-8') as f:
        make_header = True
        for i in incident_and_dl:
            if make_header:
                f.write(';'.join([fe.make_str(d) for d in i.keys()]) + '\n')
                make_header = False
            f.write(';'.join([fe.make_str(d) for d in i.values()]).replace(
                '[', '').replace(']', '') + '\n')
Пример #23
0
def get_all_season_ice(year, get_new=True):
    """Returns observed ice columns from regObs-webapi over a requested season. Ice covers representing
    first ice or ice cover lost are represented by an ice column of zero height.

    The workings of this routine:
    1.  Get one season of data from regobs-api, spreads them out to a long list.
    2.  Pick out only cover and column and group them on location_ids. We keep only locations with
        date for fist ice that season. All observations are mapped to the cover and column class in Ice.py.
    3.  Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove the rest.

    If get_new=True new data is retrieved. If get_new=false data is picked from pickle.

    :param year:
    :param get_new:
    :return:
    """

    file_name_and_path = '{0}get_all_season_ice_{1}.pickle'.format(se.local_storage, year)
    from_date, to_date = gm.get_dates_from_year(year)

    if get_new:

        all_observations = get_data(from_date=from_date, to_date=to_date, geohazard_tids=70)

        all_locations = {}

        for o in all_observations:
            if o['RegistrationTid'] == 51 or o['RegistrationTid'] == 50:
                if o['LocationId'] in all_locations.keys():
                    all_locations[o['LocationId']].append(o)
                else:
                    all_locations[o['LocationId']] = [o]

        # sort oldest first on each location
        for l, obs in all_locations.items():
            sorted_list = sorted(obs, key=lambda d: d['DtObsTime'])
            all_locations[l] = sorted_list

        # Use only locations with verified "first ice cover" date.
        all_locations_with_first_ice = {}

        for l, obs in all_locations.items():
            for o in obs:
                if o['RegistrationTid'] == 51:
                    # if the ice cover is partly or fully formed on observation location or the lake
                    # 2) delvis islagt på målestedet
                    # 3) helt islagt på målestedet
                    # 21) hele sjøen islagt
                    if (o['FullObject']['IceCoverTID'] == 2) or (o['FullObject']['IceCoverTID'] == 3) or \
                            (o['FullObject']['IceCoverTID'] == 21):
                        # and if ice cover before was
                        # 1) isfritt på målestedet
                        # 2) delvis islagt på målestedet,
                        # 11) islegging langs land
                        # 20) hele sjøen isfri,  this is fist ice
                        if (o['FullObject']['IceCoverBeforeTID'] == 1) or (o['FullObject']['IceCoverBeforeTID'] == 2) or \
                                (o['FullObject']['IceCoverBeforeTID'] == 11) or (o['FullObject']['IceCoverBeforeTID'] == 20):
                            all_locations_with_first_ice[l] = obs

        # Map all observations from regObs-webapi result structure to the classes in ice.py
        all_locations_with_classes = {}

        for l, obs in all_locations_with_first_ice.items():
            all_locations_with_classes[l] = []
            location_name = obs[0]['LocationName']

            previous_cover = ice.IceCover(dt.datetime.strptime(from_date, "%Y-%m-%d").date(), "Ikke gitt", 'Ikke gitt', location_name)

            for o in obs:
                if o['RegistrationTid'] == 51:

                    cover_date = dt.datetime.strptime(o['DtObsTime'][0:16], "%Y-%m-%dT%H:%M")
                    cover = o['FullObject']['IceCoverTName']
                    cover_before = o['FullObject']['IceCoverBeforeTName']
                    cover_after = o['FullObject']['IceCoverAfterTName']
                    cover_tid = o['FullObject']['IceCoverTID']
                    cover_before_tid = o['FullObject']['IceCoverBeforeTID']
                    cover_after_tid = o['FullObject']['IceCoverAfterTID']

                    this_cover = ice.IceCover(cover_date, cover, cover_before, location_name)
                    this_cover.set_regid(o['RegId'])
                    this_cover.set_locationid(o['LocationId'])
                    this_cover.set_utm(o['UtmNorth'], o['UtmEast'], o['UtmZone'])
                    this_cover.set_cover_after(cover_after, cover_after_tid)
                    this_cover.add_original_object(o)

                    # if the ice cover is partly or fully formed on observation location or the lake
                    # 2) delvis islagt på målestedet
                    # 3) helt islagt på målestedet
                    # 21) hele sjøen islagt
                    if cover_tid == 2 or cover_tid == 3 or cover_tid == 21:
                        # and if ice cover before was
                        # 1) isfritt, nå første is på målestedet på målestedet
                        # 2) isfritt, nå første is ved land
                        # 4) Gradvis islegging
                        if cover_before_tid == 1 or cover_before_tid == 2 or cover_before_tid == 4:
                            this_cover.mark_as_first_ice()

                    # if the ice cover is partly or fully gone on location and there was ice yesterday
                    # 1) Isfritt på målestedet
                    # 2) delvis islagt på målestedet
                    # 20) Hele sjøen isfri
                    if cover_tid == 1 or cover_tid == 2 or cover_tid == 20:
                        # 10) isfritt resten av vinteren
                        # Accepts also ice free observation after 15. March
                        to_year = this_cover.date.year
                        first_accepted_date = dt.datetime(to_year, 3, 15)
                        last_accepted_date = dt.datetime(to_year, 9, 1)
                        if cover_after_tid == 10 or (cover_date > first_accepted_date and cover_date < last_accepted_date):
                            this_cover.mark_as_ice_cover_lost()

                    # copy of this cover so that in next iteration I may look up previous cover.
                    previous_cover = cp.deepcopy(this_cover)

                    all_locations_with_classes[l].append(this_cover)

                if o['RegistrationTid'] == 50:
                    ice_column = _webapi_ice_col_to_ice_class(o)

                    if ice_column is not None:
                        all_locations_with_classes[l].append(ice_column)

        # Map all covers where first_ice or ice_cover_lost is True to zero-height columns. Remove all the rest.
        all_locations_with_columns = {}
        for k, v in all_locations_with_classes.items():
            new_v = []
            for o in v:
                if isinstance(o, ice.IceCover):
                    if o.first_ice or o.ice_cover_lost:
                        new_o = ice.IceColumn(o.date, [])
                        new_o.add_metadata('OriginalObject', o.metadata['OriginalObject'])
                        new_o.add_metadata('UTMEast', o.metadata['UTMEast'])
                        new_o.add_metadata('UTMNorth', o.metadata['UTMNorth'])
                        new_o.add_metadata('UTMZone', o.metadata['UTMZone'])
                        new_o.add_metadata('LocationName', o.locationName)
                        new_o.add_metadata('LocationID', o.LocationID)
                        new_v.append(new_o)
                else:
                    new_v.append(o)
            all_locations_with_columns[k] = new_v

        mp.pickle_anything(all_locations_with_columns, file_name_and_path)

    else:
        # if pickle file with all data for the season does not exist, get data anyway
        if not os.path.exists(file_name_and_path):
            all_locations_with_columns = get_all_season_ice(year, get_new=True)
        else:
            all_locations_with_columns = mp.unpickle_anything(file_name_and_path, print_message=False)

    return all_locations_with_columns
def get_node_list(pickle_file_name_3, make_new_node_list,
                  desired_damage_extent_kdv, incident_list):
    """Makes a list of NodesAndValues objects. All nodes get an object and relations between the nodes are
    calculated. Lots of looping.

    :param pickle_file_name_3:
    :param make_new_node_list:
    :param desired_damage_extent_kdv
    :param incident_list

    :return:
    """

    if make_new_node_list:
        problem_kdv = {
            0: 'Ikke gitt',
            3: 'Toerre loessnoeskred',
            5: 'Vaate loessnoeskred',
            7: 'Nysnoeflak',
            10: 'Fokksnoe',
            20: 'Nysnoe',
            30: 'Vedvarende svakt lag',
            37: 'Dypt vedvarende svakt lag',
            40: 'Vaat snoe',
            45: 'Vaate flakskred',
            50: 'Glideskred'
        }

        cause_kdv = gkdv.get_kdv('AvalCauseKDV')
        danger_kdv = gkdv.get_kdv('AvalancheDangerKDV')
        activity_influenced_kdv = gkdv.get_kdv('ActivityInfluencedKDV')

        nodes_dict = {}
        id_counter = -1

        for cause_tid, cause_kdve in cause_kdv.items():
            cause_name = cause_kdve.Name
            if 'kke gitt' in cause_name:
                cause_name = 'Svakt lag {0}'.format(cause_name)
            if cause_kdve.IsActive:
                id_counter += 1
                nodes_dict[cause_name] = id_counter

        for problem_tid, problem_name in problem_kdv.items():
            if 'kke gitt' in problem_name:
                problem_name = 'Skredproblem {0}'.format(problem_name)
            id_counter += 1
            nodes_dict[problem_name] = id_counter

        for desired_damage_extent_tid, desired_damage_extent_name in desired_damage_extent_kdv.items(
        ):
            if 'kke gitt' in desired_damage_extent_name:
                desired_damage_extent_name = 'Skadeomfang {0}'.format(
                    desired_damage_extent_name)
            id_counter += 1
            nodes_dict[desired_damage_extent_name] = id_counter

        for activity_influenced_tid, activity_influenced_kdve in activity_influenced_kdv.items(
        ):
            if activity_influenced_tid < 200:  # only snow
                activity_influenced_name = activity_influenced_kdve.Name
                if 'kke gitt' in activity_influenced_name:
                    activity_influenced_name = 'Aktivitet {0}'.format(
                        activity_influenced_name)
                if activity_influenced_kdve.IsActive:
                    id_counter += 1
                    nodes_dict[activity_influenced_name] = id_counter

        for danger_tid, danger_kdve in danger_kdv.items():
            danger_name = danger_kdve.Name
            if 'kke gitt' in danger_name:
                'Faregrad {0}'.format(danger_name)
            if danger_kdve.IsActive:
                id_counter += 1
                nodes_dict[danger_name] = id_counter

        make_nodes = True
        nodes_and_values = []
        print_counter = 0

        for i in incident_list:

            print('Index {0} of 192 in incidentlist'.format(print_counter))
            print_counter += 1

            if i.forecast:
                cause = i.forecast.avalanche_problems[0].cause_name
                if 'kke gitt' in cause: cause = 'Svakt lag {0}'.format(cause)
                problem = i.forecast.avalanche_problems[0].main_cause
                if 'kke gitt' in problem:
                    problem = 'Skredproblem {0}'.format(problem)

                # Loop through the cause and problem list.
                # If it is the first run make the nodes.
                # If the causes in the lists match what is in the list of actual incidents, add one to the node.
                for cause_tid, cause_kdve in cause_kdv.items():
                    if cause_kdve.IsActive:
                        cause_name = cause_kdve.Name
                        if 'kke gitt' in cause_name:
                            cause_name = 'Svakt lag {0}'.format(cause_name)
                        for problem_tid, problem_name in problem_kdv.items():
                            if 'kke gitt' in problem_name:
                                problem_name = 'Skredproblem {0}'.format(
                                    problem_name)
                            if make_nodes:  # the run of the first item of incident_list covers all nodes
                                nodes_and_values.append(
                                    NodesAndValues(cause_name,
                                                   nodes_dict[cause_name],
                                                   problem_name,
                                                   nodes_dict[problem_name]))
                            if cause in cause_name and problem in problem_name:
                                for nv in nodes_and_values:
                                    if cause in nv.node_name and problem in nv.target_name:
                                        nv.add_one()

                damage_extent = i.incident.DamageExtentName
                if 'kke gitt' in damage_extent:
                    damage_extent = 'Skadeomfang {0}'.format(damage_extent)

                for problem_tid, problem_name in problem_kdv.items():
                    if 'kke gitt' in problem_name:
                        problem_name = 'Skredproblem {0}'.format(problem_name)
                    for desired_damage_extent_tid, desired_damage_extent_name in desired_damage_extent_kdv.items(
                    ):
                        if 'kke gitt' in desired_damage_extent_name:
                            desired_damage_extent_name = 'Skadeomfang {0}'.format(
                                desired_damage_extent_name)
                        if make_nodes:
                            nodes_and_values.append(
                                NodesAndValues(
                                    problem_name, nodes_dict[problem_name],
                                    desired_damage_extent_name,
                                    nodes_dict[desired_damage_extent_name]))
                        if problem in problem_name and damage_extent in desired_damage_extent_name:
                            for nv in nodes_and_values:
                                if problem in nv.node_name and damage_extent in nv.target_name:
                                    nv.add_one()

                activity_influenced = i.incident.ActivityInfluencedName
                if 'kke gitt' in activity_influenced:
                    activity_influenced = 'Aktivitet {0}'.format(
                        activity_influenced)

                for desired_damage_extent_tid, desired_damage_extent_name in desired_damage_extent_kdv.items(
                ):
                    if 'kke gitt' in desired_damage_extent_name:
                        desired_damage_extent_name = 'Skadeomfang {0}'.format(
                            desired_damage_extent_name)
                    for activity_influenced_tid, activity_influenced_kdve in activity_influenced_kdv.items(
                    ):
                        if activity_influenced_tid < 200:  # only snow
                            activity_influenced_name = activity_influenced_kdve.Name
                            if 'kke gitt' in activity_influenced_name:
                                activity_influenced_name = 'Aktivitet {0}'.format(
                                    activity_influenced_name)
                            if activity_influenced_kdve.IsActive:
                                if make_nodes:
                                    nodes_and_values.append(
                                        NodesAndValues(
                                            desired_damage_extent_name,
                                            nodes_dict[
                                                desired_damage_extent_name],
                                            activity_influenced_name,
                                            nodes_dict[
                                                activity_influenced_name]))
                                if desired_damage_extent_name in damage_extent and activity_influenced_name in activity_influenced:
                                    for nv in nodes_and_values:
                                        if desired_damage_extent_name in nv.node_name and activity_influenced_name in nv.target_name:
                                            nv.add_one()

                danger = i.forecast.danger_level_name
                if 'kke gitt' in danger: danger = 'Faregrad {0}'.format(danger)

                for activity_influenced_tid, activity_influenced_kdve in activity_influenced_kdv.items(
                ):
                    if activity_influenced_tid < 200:
                        activity_influenced_name = activity_influenced_kdve.Name
                        if 'kke gitt' in activity_influenced_name:
                            activity_influenced_name = 'Aktivitet {0}'.format(
                                activity_influenced_name)
                        if activity_influenced_kdve.IsActive:
                            for danger_tid, danger_kdve in danger_kdv.items():
                                danger_name = danger_kdve.Name
                                if 'kke gitt' in danger_name:
                                    'Faregrad {0}'.format(danger_name)
                                if danger_kdve.IsActive:
                                    if make_nodes:
                                        nodes_and_values.append(
                                            NodesAndValues(
                                                activity_influenced_name,
                                                nodes_dict[
                                                    activity_influenced_name],
                                                danger_name,
                                                nodes_dict[danger_name]))
                                    if activity_influenced_name in activity_influenced and danger_name in danger:
                                        for nv in nodes_and_values:
                                            if activity_influenced_name in nv.node_name and danger_name in nv.target_name:
                                                nv.add_one()

            make_nodes = False

        mp.pickle_anything(nodes_and_values, pickle_file_name_3)
    else:
        nodes_and_values = mp.unpickle_anything(pickle_file_name_3)

    return nodes_and_values
Пример #25
0
    ### Get all regions
    region_ids = gm.get_forecast_regions(season)
    from_date, to_date = gm.get_forecast_dates(season)
    # from_date, to_date = '2017-12-01', '2018-02-01'
    # region_ids = [3014, 3015]

    ### get and make the data set
    date_region, forecasted_dangers = step_1_make_data_set(
        region_ids, from_date, to_date)
    mp.pickle_anything([date_region, forecasted_dangers],
                       '{0}runforavalancheactivity_step_1.pickle'.format(
                           env.local_storage))

    ### Find the observaton of highest value pr region pr date
    date_region, forecasted_dangers = mp.unpickle_anything(
        '{0}runforavalancheactivity_step_1.pickle'.format(env.local_storage))
    date_region = step_2_find_most_valued(date_region)
    mp.pickle_anything([date_region, forecasted_dangers],
                       '{0}runforavalancheactivity_step_2.pickle'.format(
                           env.local_storage))

    ### ready to add to count elements
    date_region, forecasted_dangers = mp.unpickle_anything(
        '{0}runforavalancheactivity_step_2.pickle'.format(env.local_storage))
    elements = rf.read_configuration_file(
        '{0}aval_dl_configuration.csv'.format(env.matrix_configurations),
        ActivityAndDanger)
    elements = step_3_count_occurances(date_region, elements)
    mp.pickle_anything([date_region, forecasted_dangers, elements],
                       '{0}runforavalancheactivity_step_3.pickle'.format(
                           env.local_storage))
Пример #26
0
def get_all_forecasts(year, lang_key=1, max_file_age=23):
    """Specialized method for getting all forecasts for one season.
    For the current season (at the time of writing, 2018-19), if a request
    has been made the last 23hrs, data is retrieved from a locally stored pickle,
    if not, new request is made to the regObs api. Previous seasons are not
    requested if a pickle is found in local storage.

    :param year:                [string] Eg. season '2017-18'
    :param lang_key             [int] 1 is norwegian, 2 is english
    :param max_file_age:        [int] hrs how old the file is before new is retrieved

    :return valid_forecasts:    [list of AvalancheWarning]
    """

    from_date, to_date = gm.get_forecast_dates(year=year)
    file_name = '{0}all_forecasts_{1}_lk{2}.pickle'.format(
        env.local_storage, year, lang_key)
    file_date_limit = dt.datetime.now() - dt.timedelta(hours=max_file_age)

    # if we are well out of the current season (30 days) its little chance the data set has changed.
    current_season = gm.get_season_from_date(dt.date.today() -
                                             dt.timedelta(30))

    # Get forecast regions used in the current year
    region_ids = gm.get_forecast_regions(year, get_b_regions=True)

    get_new = True

    if os.path.exists(file_name):
        # if file contains a season long gone, dont make new.
        if year == current_season:
            file_age = dt.datetime.fromtimestamp(os.path.getmtime(file_name))
            # If file is newer than the given time limit, dont make new.
            if file_age > file_date_limit:
                # If file size larger than that of an nearly empty file, dont make new.
                if os.path.getsize(file_name) > 100:  # 100 bytes limit
                    get_new = False
        else:
            get_new = False

    if get_new:
        lg.info(
            "getvarsompickles.py -> get_all_forecasts: Get new {0} forecasts and pickle."
            .format(year))

        all_forecasts = gfa.get_avalanche_warnings(region_ids,
                                                   from_date,
                                                   to_date,
                                                   lang_key=lang_key)

        # Valid forecasts have a danger level. The other are empty.
        valid_forecasts = []
        for f in all_forecasts:
            if f.danger_level > 0:
                valid_forecasts.append(f)

        mp.pickle_anything(valid_forecasts, file_name)

    else:
        valid_forecasts = mp.unpickle_anything(file_name)

    return valid_forecasts
Пример #27
0
def make_forecasts_for_Espen_at_sweco():
    """Hei. I forbindelse med et prosjekt i Sørreisa i Troms ønsker vi å gi råd til vår kunde om evakuering av bygg
    i skredutsatt terreng. Som en del av vår vurdering hadde det vært veldig nyttig med statistikk for varslingen,
    altså statistikk om hvor ofte de ulike faregradene er varslet. Er det mulig å få tak i slik statistikk?
    Gjerne så langt tilbake i tid som mulig. Vennlig hilsen Espen Eidsvåg"""

    pickle_file_name = '{0}forecasts_sorreisa_espen.pickle'.format(
        env.local_storage)

    get_new = True
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [110, 112]  # Senja, Bardu
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        years = ['2016-17', '2017-18', '2018-19']
        region_ids = [3012, 3013]  # Sør Troms, Indre Troms
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel for Sørreisa.Espen Eidsvåg Sweco.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
Пример #28
0
def plot_season_for_all_regobs_locations(year='2018-19', calculate_new=False, get_new_obs=False, make_plots=False, delete_old_plots=False):
    """Method specialized for scheduled plotting for iskart.no.
    Method makes a season plot for all ObsLocations in regObs where we have a first ice date.

    It may take some time to plot. 250 lakes for a season and for each plot weather params are requested from the GTS.

    The workings of the method:
    1.  get all locations ids and belonging observations where we have first ice.
    2.1 if calculate new, empty sesong folder and pickle in local storage and calculate (and make plots if requested).
    2.2 Make metadata json for showing files on iskart.no
    3.  All calculations are compared to observed data in scatter plot.

    :param year:                [String] Season for plotting. eg: '2016-17'
    :param calculate_new:       [bool] Calculate new ice thicks. If false only make the seasonal scatter.
    :param get_new_obs:         [bool]
    :param make_plots:          [bool]  If False all calculations are made, but only the scatter comparison against observatiosn is ploted
    :param delete_old_plots:    [bool]  If True all former plots and pickles are removed.
    """

    pickle_file_name_and_path = '{0}all_calculated_ice_{1}.pickle'.format(se.local_storage, year)
    location_id_metadata_json = '{}location_id_metadata.json'.format(se.sesong_plots_folder)

    if calculate_new:
        if delete_old_plots:
            # Empty the sesong plot folder
            for file in os.listdir(se.sesong_plots_folder):
                file_path = os.path.join(se.sesong_plots_folder, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except OSError:
                    pass

            # remove pickle old data - because we are getting new
            try:
                os.remove(pickle_file_name_and_path)
            except OSError:
                pass

        all_observations = gro.get_all_season_ice(year, get_new=get_new_obs)
        from_date, to_date = gm.get_dates_from_year(year)
        all_calculated = {}
        all_observed = {}
        location_id_metadata = {}

        for location_id, observed_ice in all_observations.items():
            try:
                calculated, observed, plot_filename = _plot_season(
                    location_id, from_date, to_date, observed_ice, make_plots=make_plots, plot_folder=se.sesong_plots_folder)
                all_calculated[location_id] = calculated
                all_observed[location_id] = observed
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print("[error] calculateandplot.py -> plot_season_for_all_regobs_locations: Error making plot for {}".format(error_msg, location_id))

            # Make the json with metadata needed for iskart.no. Add only if the plot was made and thus file exists.
            if os.path.isfile(se.sesong_plots_folder + plot_filename):

                region_name = observed_ice[0].metadata['OriginalObject']['ForecastRegionName']
                if not region_name:
                    region_name = 'Ukjent region'
                lake_id = observed_ice[0].metadata['LocationID']
                x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata['UTMNorth']
                lake_name = observed_ice[0].metadata['LocationName']
                if not lake_name:
                    lake_name = 'E{} N{}'.format(x, y)

                location_id_metadata[location_id] = {'RegionName': region_name,
                                                     'LakeID': lake_id,
                                                     'LakeName': lake_name,
                                                     'PlotFileName': plot_filename}

        mp.pickle_anything([all_calculated, all_observed], pickle_file_name_and_path)

        try:
            json_string = json.dumps(location_id_metadata, ensure_ascii=False).encode('utf-8')
            with open(location_id_metadata_json, 'wb') as f:
                f.write(json_string)
        except:
            error_msg = sys.exc_info()[0]
            ml.log_and_print("[error]calculateandplot.py -> plot_season_for_all_regobs_locations: Cant write json. {}".format(error_msg))

    else:
        [all_calculated, all_observed] = mp.unpickle_anything(pickle_file_name_and_path)

    try:
       pts.scatter_calculated_vs_observed(all_calculated, all_observed, year)
    except:
       error_msg = sys.exc_info()[0]
       ml.log_and_print("[error] calculateandplot.py -> plot_season_for_all_regobs_locations: {}. Could not plot scatter {}.".format(error_msg, year))
Пример #29
0
def calculate_and_plot9d_season(period='2018-19'):
    """Calculate ice columns for 9 days and make plots of all ice thickness for a given season or optionally 'Today'.

    The inner workings:
    1.1 Retrieves ice thickness observations from regObs. If period is given as a season, all observations for
        this season will be requested. All previous plots and local storage will be deleted.
    1.2 If period='Today' ice thickness observations from today will be requested and plotted. Older plots will be
        in the folder. Metadata dict will be merged.
    2.  Calculate the 9 day prognosis from the observation time and plots the result.
    3.  Make a metadata json for handling files on iskart.no. Only confirmed files in folder will be
        added to metadata json.

    :param period:    [String] Default is current season (2017-18).
    :return:
    """

    log_referance = 'calculateandplot.py -> calculate_and_plot9d_season'

    # File names
    regid_metadata_json = '{}regid_metadata.json'.format(se.ni_dogn_plots_folder)
    regid_metadata_pickle = '{}regid_metadata.pickle'.format(se.local_storage)

    if period == 'Today':
        ice_thicks = gro.get_ice_thickness_today()

    else:
        # Empty the 9dogn folder
        # for file in os.listdir(se.ni_dogn_plots_folder):
        #     file_path = os.path.join(se.ni_dogn_plots_folder, file)
        #     try:
        #         if os.path.isfile(file_path):
        #             os.unlink(file_path)
        #     except OSError:
        #         pass

        # remove pickle with metadata
        try:
            os.remove(regid_metadata_pickle)
        except OSError:
            pass

        # Get new observations
        ice_thicks = gro.get_ice_thickness_observations(period, reset_and_get_new=True)

    # Calculate and plot
    for k, v in ice_thicks.items():

        # If file is missing, make it. If observation is older than 11 days it is based on gridded data for sure and no plot file needed.
        make_plot = False
        max_file_age = 11
        date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
        file_names = os.listdir(se.ni_dogn_plots_folder)
        plot_filename = '{0}.png'.format(k)
        if plot_filename not in file_names:
            make_plot = True
        else:
            if v.date.date() > date_limit.date():
                make_plot = True

        if make_plot:
            try:
                calculate_and_plot9d_regid(k, plot_folder=se.ni_dogn_plots_folder, observed_ice=v)
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print("[Error] {} Error making plot for {} {}".format(log_referance, k, error_msg))

    # Make json with metadata for using files on iskart.no. Load metadata from pickle if available and
    # new observations where a plot is available will be made.
    if not os.path.exists(regid_metadata_pickle):
        regid_metadata = {}
    else:
        regid_metadata = mp.unpickle_anything(regid_metadata_pickle)

    list_of_plots = os.listdir(se.ni_dogn_plots_folder)

    for k, v in ice_thicks.items():
        # only add metadata on files that are in the folder
        if '{0}.png'.format(k) in list_of_plots:
            date = v.date.date()

            region_name = v.metadata['OriginalObject']['ForecastRegionName']
            if not region_name:
                region_name = 'Ukjent region'
            x, y = v.metadata['UTMEast'], v.metadata['UTMNorth']
            lake_id = v.metadata['LocationID']
            lake_name = v.metadata['LocationName']
            if not lake_name:
                lake_name = 'E{} N{}'.format(x,y)

            regid_metadata[k] = {'RegionName':region_name,'LakeID':lake_id,'LakeName':lake_name,'Date':'{}'.format(date)}

    mp.pickle_anything(regid_metadata, regid_metadata_pickle)

    json_string = json.dumps(regid_metadata, ensure_ascii=False).encode('utf-8')
    with open(regid_metadata_json, 'wb') as f:
        f.write(json_string)
def make_forecasts_for_Thea():
    """July 2018: Make list of avalanche forecasts danger levels for regions Voss, Romsdalen, Svartisen
    and Salten (and those before them) for Thea Møllerhaug Lunde (Jernbanedirektoratet).

    Voss-Bergen ligger i for det meste i Voss-regionen vår.
    Mo i Rana-Fauske ligger i Svartisen og Salten.
    Åndalsnes-Bjorli ligger i varslingsregionen Romsdalen."""

    pickle_file_name = '{0}201807_avalanche_forecasts_thea.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Romsdalen. ForecastRegionTID 118 from 2012-2016 and 3023 since.
        # Get Svartisen. ForecastRegionTID 131 from 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 118, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3023, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Thea.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    pass
Пример #31
0
def get_kdv(view):
    """Imports a view view from regObs and returns a dictionary with <key, value> = <ID, Name>
    An view is requested from the regObs api if the pickle file is older than 3 days.

    :param view:    [string]    kdv view
    :return dict:   {}          view as a dictionary

    Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
    Ex of url for returning values for IceCoverKDV in norwegian:
    http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json
    """

    kdv_file_name = '{0}{1}.pickle'.format(env.local_storage, view)
    dict = {}

    if os.path.exists(kdv_file_name):

        max_file_age = 3
        # file_date_seconds = os.path.getctime(kdv_file_name)
        file_date_seconds = os.path.getmtime(kdv_file_name)
        file_date_datetime = dt.datetime.fromtimestamp(file_date_seconds)
        file_date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)

        if file_date_datetime < file_date_limit:
            ml.log_and_print("[info] getkdvelements.py -> get_kdv: Old xKDV. Removing file from local storage: {0}".format(kdv_file_name))
            os.remove(kdv_file_name)
            ordered_dict = get_kdv(view)
            mp.pickle_anything(ordered_dict, kdv_file_name)
        else:
            # ml.log_and_print("[info] getkdvelements.py -> get_kdv: Getting KDV from local storage: {0}".format(kdv_file_name))
            ordered_dict = mp.unpickle_anything(kdv_file_name, print_message=False)

    else:

        filter = 'filter=Langkey%20eq%201'

        if 'TripTypeKDV' in view:
            filter = 'filter=LangKey%20eq%201'

        url = 'https://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?${2}&$format=json'.format(env.odata_version, view, filter)
        lang_key = 1

        print("getkdvelements.py -> get_kdv: Getting KDV from URL: {0}".format(url))
        kdv = requests.get(url).json()

        for a in kdv['d']['results']:
            try:
                sort_order = a['SortOrder']
                is_active = a['IsActive']

                if 'AvalCauseKDV' in url and 9 < int(a['ID']) < 26:      # this table gets special treatment. Short names are in description and long names are in Name.
                    id = int(a['ID'])
                    name = a['Description']
                    description = a['Name']
                elif 'TripTypeKDV' in view:
                    id = int(a['TripTypeTID'])
                    name = a['Name']
                    description = a['Descr']
                else:
                    id = int(a['ID'])
                    name = a['Name']
                    description = a['Description']

                dict[id] = vc.KDVelement(id, sort_order, is_active, name, description, lang_key)

            except (RuntimeError, TypeError, NameError):
                pass

        ordered_dict = collections.OrderedDict(sorted(dict.items()))
        mp.pickle_anything(ordered_dict, kdv_file_name)

    return ordered_dict
Пример #32
0
def get_ice_thickness_observations(year, reset_and_get_new=False):
    """Gets all the observed ice thickness (RegistrationTID = 50) from regObs for one year.

    The inner workings of the method:
    1.   We have an option of resetting local storage (delete pickle) and thus forcing the get_new.
    2.1  Try opening a pickle, if it doesnt exist, an exception is thrown and we get new data.
    2.2  If the requested data is from a previous season, no changes are expected, so load the pickle
         without adding the last observations registered in regObs. Anyway, don't get new data.
    2.3  If the requested data is from this season, set request from_date to the last modified
         date of the pickle and 7 days past that. Add these last obs to the pickle data, and thus it is not
         necessary to get new.
    3.   If get new, it gets all new data for the season.
    4.   Else, load pickle and if some last obs are to be added, do so.

    :param year:                [string] Eg '2017-18'
    :param reset_and_get_new:   [bool]
    :return:                    ice_thickeness_obs_dict
    """

    log_referance = 'getregobsdata.py -> get_ice_thickness_observations'
    pickle_file_name = '{0}get_ice_thickness_observations_{1}.pickle'.format(se.local_storage, year)

    # 1. Remove pickle if it exists, forcing the get_new
    if reset_and_get_new:
        try:
            os.remove(pickle_file_name)
        except OSError:
            pass

    from_date, to_date = gm.get_dates_from_year(year)
    add_last_obs = None
    get_new = None

    try:
        mtime = os.path.getmtime(pickle_file_name)
        last_modified_date = dt.datetime.fromtimestamp(mtime).date()

        # if file newer than the season (that is, if this is historical data), load it without requesting new.
        dt_to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()
        if last_modified_date > dt_to_date:
            add_last_obs = False
        else:
            add_last_obs = True
            to_date = dt.date.today()
            from_date = last_modified_date - dt.timedelta(days=7)

        get_new = False

    except OSError:
        # file does not exists, so get_new.
        ml.log_and_print("{0}: No matching pickle found, getting new data.".format(log_referance))
        get_new = True

    if get_new:
        ml.log_and_print('{0}: Getting new for year {1}.'.format(log_referance, year))
        ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
        ice_thickeness_obs_dict = {}

        for o in ice_thickeness_obs:
            if o['RegistrationTid'] == 50:
                ice_column = _webapi_ice_col_to_ice_class(o)
                if ice_column is not None:
                    ice_thickeness_obs_dict[o['RegId']] = ice_column

        mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)

    else:
        ice_thickeness_obs_dict = mp.unpickle_anything(pickle_file_name)

        if add_last_obs:
            ml.log_and_print("{0}: Adding observations from {1} to {2}".format(log_referance, from_date, to_date))
            new_ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
            new_ice_thickeness_obs_dict = {}

            for o in new_ice_thickeness_obs:
                if o['RegistrationTid'] == 50:
                    ice_column = _webapi_ice_col_to_ice_class(o)
                    if ice_column is not None:
                        new_ice_thickeness_obs_dict[o['RegId']] = ice_column

            for k,v in new_ice_thickeness_obs_dict.items():
                ice_thickeness_obs_dict[k] = v

            mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)

    return ice_thickeness_obs_dict
Пример #33
0
    def add_Inflow_DOP(self, Inflow_DOP_inn):
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn


    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn


    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today()-dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path +'.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +'.pickle')

    mfd.write_mylake_inputfile(data2)