コード例 #1
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_observer_group_member(group_id=None, output='List'):
    """Gets data on observers in a group. If no group is requested, all is retrieved.

    :param group_id:    [int]
    :param output:      [string] 'List' or 'Dict'
    :return:            [list] of class ObserverGroupMember or dictionary with observer id and observer nicks.
    """

    if group_id is None:
        url = 'http://api.nve.no/hydrology/regobs/{0}/Odata.svc/ObserverGroupMemberV/?$format=json'.format(
            env.odata_version)
    else:
        url = 'http://api.nve.no/hydrology/regobs/{0}/Odata.svc/ObserverGroupMemberV/?$filter=ObserverGroupID%20eq%20{1}&$format=json'.format(
            env.odata_version, group_id)
    ml.log_and_print(
        "[info] getmisc.py -> get_observer_group_member: {0}".format(url))

    result = requests.get(url).json()
    data = result['d']['results']
    data_out = [ObserverGroupMember(d) for d in data]

    if output == 'List':
        return data_out
    elif output == 'Dict':
        observer_dict = {}
        for o in data_out:
            observer_dict[o.ObserverID] = o.NickName
        return observer_dict
コード例 #2
0
def get_landslide_warnings_as_json(municipality,
                                   from_date,
                                   to_date,
                                   lang_key=1,
                                   recursive_count=5):
    """Selects landslide warnings and returns the json structured as given on the api as dict objects.

    :param municipality:    [int or list of ints]       Municipality numbers
    :param from_date:       [date or string as yyyy-mm-dd]
    :param to_date:         [date or string as yyyy-mm-dd]
    :param lang_key:        [int]                       Language setting. 1 is norwegian and 2 is english.
    :param recursive_count  [int]                       by default attempt the same request # times before giving up

    :return warnings:       [warnings]

    Eg. https://api01.nve.no/hydrology/forecast/landslide/v1.0.5/api/Warning/Municipality/1201/1/2018-06-03/2018-07-03
    """

    # If input isn't a list, make it so
    if not isinstance(municipality, list):
        municipality = [municipality]

    landslide_warnings = []
    recursive_count_default = recursive_count  # need the default for later

    for m in municipality:

        if len(municipality) > 1:
            # if we are looping the initial list make sure each item gets the recursive count default
            recursive_count = recursive_count_default

        landslide_api_base_url = 'https://api01.nve.no/hydrology/forecast/landslide/v1.0.5/api'
        headers = {'Content-Type': 'application/json'}
        url = landslide_api_base_url + '/Warning/Municipality/{0}/{1}/{2}/{3}'.format(
            m, lang_key, from_date, to_date)

        # If at first you don't succeed, try and try again.
        try:
            landslide_warnings_municipal = requests.get(
                url, headers=headers).json()
            ml.log_and_print(
                '[info] getforecastapi.py -> get_landslide_warnings_as_json: {0} warnings found for {1} in {2} to {3}'
                .format(len(landslide_warnings_municipal), m, from_date,
                        to_date))
            landslide_warnings += landslide_warnings_municipal

        except:
            ml.log_and_print(
                '[error] getforecastapi.py -> get_avalanche_warnings_as_json: EXCEPTION. RECURSIVE COUNT {0} for {1} in {2} to {3}'
                .format(recursive_count, m, from_date, to_date))
            if recursive_count > 1:
                recursive_count -= 1  # count down
                landslide_warnings += get_landslide_warnings_as_json(
                    m,
                    from_date,
                    to_date,
                    lang_key,
                    recursive_count=recursive_count)

    return landslide_warnings
コード例 #3
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_obs_location(from_date, to_date):
    """Finds obs locations submitted during a given period.

    :param from_date:
    :param to_date:
    :return locations:  [list] of class ObsLocation
    """

    odata_filter = "DtRegTime gt datetime'{0}' and DtRegTime lt datetime'{1}' and langkey eq 1".format(
        from_date, to_date)

    url = 'http://api.nve.no/hydrology/regobs/{0}/Odata.svc/ObsLocationV/?$filter={1}&$format=json'.format(
        env.odata_version, odata_filter)
    result = requests.get(url).json()
    data = result['d']['results']
    ml.log_and_print('[info] getmisc.py -> get_obs_location: {0}'.format(url))

    # if more than 1000 elements are requested, odata truncates data to 1000. We do more requests
    locations = [ObsLocation(d) for d in data]

    if from_date != to_date and from_date != dt.date(2016, 11, 22):
        if len(locations) == 1000:
            time_delta = to_date - from_date
            date_in_middle = from_date + time_delta / 2
            locations = get_obs_location(from_date,
                                         date_in_middle) + get_obs_location(
                                             date_in_middle, to_date)
    else:
        ml.log_and_print(
            '[warning] getmisc.py -> get_obs_location: More than 1000 locations on 2016.11.22'
        )

    return locations
コード例 #4
0
def adjust_temperature_to_new_altitude(weather_element_list, new_altitude):
    """If the weather parameter represents a different altitude, adjust the time series to a new altitude
    given a laps rate given in constants.

    :param weather_element_list:
    :param new_altitude:
    :return:
    """

    if new_altitude is None:
        ml.log_and_print(
            "[warning] weatherelement.py -> adjust_temperature_to_new_altitude: new_element=None and no adjustments made."
        )
        return weather_element_list

    else:
        original_altitude = weather_element_list[0].Metadata[
            'WeatherDataAltitude']
        temp_delta = (new_altitude - original_altitude) * const.laps_rate

        for we in weather_element_list:
            original_value = we.Value
            we.Value -= temp_delta
            we.Metadata['WeatherDataAltitude'] = new_altitude
            we.Metadata['OriginalAltitude'] = original_altitude
            we.Metadata[
                'AltitudeAdjustment'] = 'Adjusting elevation by {0} m, thus also temp from {1}C to {2}C.'.format(
                    new_altitude - original_altitude, original_value, we.Value)

        # ml.log_and_print("[info] weatherelement.py -> adjust_temperature_to_new_altitude: old:{0}masl and new:{1}masl and tempdelta:{2}C".format(original_altitude, new_altitude, temp_delta))
        return weather_element_list
コード例 #5
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_forecast_region_for_regid(reg_id):
    """Returns the forecast region used at a given place in a given season.

    :param reg_id: [int]            regid in regObs
    :return:       [int]            ForecastRegionTID from regObs
                   [string]         ForecastRegionName from regObs
                   [observation]    The full observation on this regID
    """

    region_id, region_name, observation = None, None, None

    try:
        observation = go.get_all_observations(reg_ids=reg_id)
        utm33x = observation[0].UTMEast
        utm33y = observation[0].UTMNorth
        date = observation[0].DtObsTime
        season = get_season_from_date(date.date())

        region_id, region_name = get_forecast_region_for_coordinate(
            utm33x, utm33y, season)

    except:
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            '[error] getmisc.py -> get_forecast_region_for_regid: Exception on RegID={0}: {1}.'
            .format(reg_id, error_msg))

    return region_id, region_name, observation
コード例 #6
0
    def set_aval_cause_attributes(self, problem_object):

        if isinstance(problem_object, go.AvalancheEvalProblem2):
            self.cause_attribute_crystal_tid = problem_object.AvalCauseAttributeCrystalTID
            self.cause_attribute_light_tid = problem_object.AvalCauseAttributeLightTID
            self.cause_attribute_soft_tid = problem_object.AvalCauseAttributeSoftTID
            self.cause_attribute_thin_tid = problem_object.AvalCauseAttributeThinTID

            self.cause_attribute_crystal = problem_object.AvalCauseAttributeCrystalName
            self.cause_attribute_light = problem_object.AvalCauseAttributeLightName
            self.cause_attribute_soft = problem_object.AvalCauseAttributeSoftName
            self.cause_attribute_thin = problem_object.AvalCauseAttributeThinName

            # This was fixed on api in nov 2017.
            # if self.lang_key == 2 and self.cause_attribute_crystal_tid > 0:
            #     self.cause_attribute_crystal = 'A big and identifiable crystal in the weak layer.'
            # if self.lang_key == 2 and self.cause_attribute_light_tid > 0:
            #     self.cause_attribute_light = 'The weak layer collapses easily and clean (easy propagation).'
            # if self.lang_key == 2 and self.cause_attribute_soft_tid > 0:
            #     self.cause_attribute_soft = 'The overlying slab is soft.'
            # if self.lang_key == 2 and self.cause_attribute_thin_tid > 0:
            #     self.cause_attribute_thin = 'The collapsing weak layer is thin < 3 cm.'

        else:
            ml.log_and_print(
                'getproblems -> AvalancheProblem.set_aval_cause_attributes: Avalanche problem class wrong for cause attributes.'
            )
コード例 #7
0
def adjust_temperature_to_new_altitude(weather_element_list, new_altitude):
    """If the weather parameter represents a different altitude, adjust the time series to a new altitude
    given a laps rate given in constants.

    :param weather_element_list:
    :param new_altitude:
    :return:
    """

    if new_altitude is None:
        ml.log_and_print("[warning] weatherelement.py -> adjust_temperature_to_new_altitude: new_element=None and no adjustments made.")
        return weather_element_list

    else:
        original_altitude = weather_element_list[0].Metadata['WeatherDataAltitude']
        temp_delta = (new_altitude - original_altitude) * const.laps_rate

        for we in weather_element_list:
            original_value = we.Value
            we.Value -= temp_delta
            we.Metadata['WeatherDataAltitude'] = new_altitude
            we.Metadata['OriginalAltitude'] = original_altitude
            we.Metadata['AltitudeAdjustment'] = 'Adjusting elevation by {0} m, thus also temp from {1}C to {2}C.'.format(new_altitude-original_altitude, original_value, we.Value)

        # ml.log_and_print("[info] weatherelement.py -> adjust_temperature_to_new_altitude: old:{0}masl and new:{1}masl and tempdelta:{2}C".format(original_altitude, new_altitude, temp_delta))
        return weather_element_list
コード例 #8
0
def make_region_plots(all_observations_list,
                      region_ids,
                      months,
                      plot_folder=env.plot_folder,
                      html_folder=env.output_folder + 'views/'):
    """Method prepares data for plotting and making the corresponding table for the observations for one
    region.

    :param all_observations_list:
    :param region_ids:
    :param months:
    :param plot_folder:         Folder for saving the plots.
    :param html_folder:         Output folder for the html files generated.
    :return:
    """

    for frid in region_ids:

        region_observations_list = [
            all_obs for all_obs in all_observations_list
            if all_obs.ForecastRegionTID == frid
        ]
        region_name = gkdv.get_name('ForecastRegionKDV', frid)
        ml.log_and_print(
            "[info] plotcalendardata.py -> make_region_plots: {} {}".format(
                frid, region_name))

        # plot one month at the time
        for m in months:
            dates = _make_day_data_list(region_observations_list, m, frid=frid)

            _make_plot(dates, region_name=region_name, plot_folder=plot_folder)
            _make_html(dates, region_name=region_name, html_folder=html_folder)
コード例 #9
0
def add_layer_conductance_to_total(u_total, k, h, layer_enum):
    """Adds a layers conductance to a total conductance.

    Conductance is conductivity pr unit length. I.e. U = k/h where k is conductivity and h is height of ice layer
    Sum of conductance follows the rule 1/U = 1/U1 + 1/U2 + ... + 1/Un

    Method incorporates Ashtons (1989) method for thin ice growth, declaring that the top part of ice has a lower
    conductivity.
    """

    if layer_enum == 10:  # Black ice
        surface_k_reduction = const.surface_k_reduction_black_ice
        h_min_for_conductivity = const.h_min_for_conductivity_black_ice
    elif layer_enum == 11:  # slush ice
        surface_k_reduction = const.surface_k_reduction_slush_ice
        h_min_for_conductivity = const.h_min_for_conductivity_slush_ice
    elif layer_enum >= 20:  # Snow
        surface_k_reduction = const.surface_k_reduction_snow
        h_min_for_conductivity = const.h_min_for_conductivity_snow
    else:  # If enum is unknown (most likely slush or water layer), use black_ice conditions
        ml.log_and_print(
            "[warning] ice.py -> add_layer_conductance_to_total: Unknown layer enum {}"
            .format(layer_enum))
        surface_k_reduction = const.surface_k_reduction_black_ice
        h_min_for_conductivity = const.h_min_for_conductivity_black_ice

    # Max conductance always defined by black ice material constants.
    u_max = const.k_black_ice * const.surface_k_reduction_black_ice / const.h_min_for_conductivity_black_ice

    # No Ice? Set initial conductance to the max possible
    if h == 0 and u_total is None:
        u_total = u_max

    # Else we have ice height
    else:

        # if u_total is None, this is the surface layer.
        if u_total is None:

            # If height is less than minimum height for conductivity, all ice conductivity is reduced
            if h <= h_min_for_conductivity:
                u_total = k * surface_k_reduction / h

            # Else if height is more than minimum height, split layer in two, adding reduced conductivity to the rest
            else:
                u_total = u_max
                u_total = 1 / (1 / u_total + (h - h_min_for_conductivity) / k)

        # Else we are in deeper layers and we dont need to adjust for surface effects
        else:
            u_total = 1 / (1 / u_total + h / k)

    # Too large conductance. Decrease to max value
    if u_total > u_max:
        u_total = u_max

    return u_total
コード例 #10
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_registration(from_date,
                     to_date,
                     output='List',
                     geohazard_tid=None,
                     application_id='',
                     include_deleted=False):
    """Gets data from the Registration table. Adds observer nickname if list is requested and if not otherwise
    specified deleted registrations are taken out.

    :param from_date:
    :param to_date:
    :param output:
    :param geohazard_tid:
    :param application_id:
    :param include_deleted:
    :return:                raw data from the request or list of class Registration objects
    """

    odata_filter = ""
    if geohazard_tid is not None:
        odata_filter += "GeoHazardTID eq {0} and ".format(geohazard_tid)
    odata_filter += "DtRegTime gt datetime'{0}' and DtRegTime lt datetime'{1}'".format(
        from_date, to_date)
    if "Web and app" in application_id:  # does not work..
        odata_filter += " and (ApplicationId eq guid'{0}' or ApplicationId eq guid'{1}')".format(
            '', '')

    url = 'http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{1}/?$filter={2}&$format=json'.format(
        env.odata_version, 'Registration', odata_filter)
    ml.log_and_print(
        "[info] getmisc.py -> get_registration: ..to {0}".format(url),
        print_it=True)

    result = requests.get(url).json()
    data = result['d']['results']

    # if more than 1000 elements are requested, odata truncates data to 1000. We do more requests
    if len(data) == 1000:
        time_delta = to_date - from_date
        date_in_middle = from_date + time_delta / 2
        data = get_registration(from_date, date_in_middle, output='Raw', geohazard_tid=geohazard_tid, application_id=application_id) \
                 + get_registration(date_in_middle, to_date, output='Raw', geohazard_tid=geohazard_tid, application_id=application_id)

    if output == 'Raw':
        return data
    elif output == 'List':
        data_out = [Registration(d) for d in data]
        observer_nicks = get_observer_v()
        # NickName is not originally in the Registration table
        for d in data_out:
            d.NickName = observer_nicks[d.ObserverID]
            # why list??? d.NickName = [observer_nicks[d.ObserverID]]
        if include_deleted == False:
            data_out = [d for d in data_out if d.DeletedDate is None]
        return data_out
コード例 #11
0
ファイル: ice.py プロジェクト: ragnarekker/Ice-modelling
def add_layer_conductance_to_total(u_total, k, h, layer_enum):
    """Adds a layers conductance to a total conductance.

    Conductance is conductivity pr unit length. I.e. U = k/h where k is conductivity and h is height of ice layer
    Sum of conductance follows the rule 1/U = 1/U1 + 1/U2 + ... + 1/Un

    Method incorporates Ashtons (1989) method for thin ice growth, declaring that the top part of ice has a lower
    conductivity.
    """

    if layer_enum == 10:    # Black ice
        surface_k_reduction = const.surface_k_reduction_black_ice
        h_min_for_conductivity = const.h_min_for_conductivity_black_ice
    elif layer_enum == 11:  # slush ice
        surface_k_reduction = const.surface_k_reduction_slush_ice
        h_min_for_conductivity = const.h_min_for_conductivity_slush_ice
    elif layer_enum >= 20:  # Snow
        surface_k_reduction = const.surface_k_reduction_snow
        h_min_for_conductivity = const.h_min_for_conductivity_snow
    else:   # If enum is unknown (most likely slush or water layer), use black_ice conditions
        ml.log_and_print("[warning] ice.py -> add_layer_conductance_to_total: Unknown layer enum {}".format(layer_enum))
        surface_k_reduction = const.surface_k_reduction_black_ice
        h_min_for_conductivity = const.h_min_for_conductivity_black_ice

    # Max conductance always defined by black ice material constants.
    u_max = const.k_black_ice * const.surface_k_reduction_black_ice / const.h_min_for_conductivity_black_ice

    # No Ice? Set initial conductance to the max possible
    if h == 0 and u_total is None:
        u_total = u_max

    # Else we have ice height
    else:

        # if u_total is None, this is the surface layer.
        if u_total is None:

            # If height is less than minimum height for conductivity, all ice conductivity is reduced
            if h <= h_min_for_conductivity:
                u_total = k * surface_k_reduction / h

            # Else if height is more than minimum height, split layer in two, adding reduced conductivity to the rest
            else:
                u_total = u_max
                u_total = 1/(1/u_total+(h-h_min_for_conductivity)/k)

        # Else we are in deeper layers and we dont need to adjust for surface effects
        else:
            u_total = 1/(1/u_total + h/k)

    # Too large conductance. Decrease to max value
    if u_total > u_max:
        u_total = u_max

    return u_total
コード例 #12
0
def get_varsom_incidents(add_forecast_regions=False, add_observations=False, add_forecasts=False):
    """Returns the incidents shown on varsom.no in a list of VarsomIncident objects.
    Data input is a utf-8 formatted csv file in input folder. Original file might have newlines and
    semicolons (;) in the cells. These need to be removed before saving as csv.

    :param add_forecast_regions:    [bool] If true the regid is used to get coordinates and the forecast region at the
                                    observation date is added. Note, if true, some time is to be expected getting data.
    :param add_observations:        [bool] If true the observation is added when looking up the region name. This
                                    option is only taken into account if add_forecast_regions is true.
    :param add_forecasts:           [bool] If true the forecast at that time and place is added to the incident. This
                                    option is only taken into account if add_forecast_regions is true.
    """

    # incidents_file = '{}varsomsineskredulykker.csv'.format(env.varsom_incidents)
    incidents_file = '{}varsomincidents3.csv'.format(env.varsom_incidents)
    varsom_incidents = rf.read_csv_file(incidents_file, VarsomIncident)

    # map incident to forecast region
    if add_forecast_regions:
        for i in varsom_incidents:
            if i.regid == []:
                ml.log_and_print("[warning] getmisc.py -> get_varsom_incidents: No regid on incident on {}. No forecast region found.".format(i.date))
            else:
                region_id, region_name, observation = get_forecast_region_for_regid(i.regid[0])
                i.add_forecast_region(region_id, region_name)
                print("regid {}: {}".format(i.regid[0], i.date))

                if add_observations:
                    i.add_observation(observation[0])
                    if len(i.regid) > 1:
                        observations = go.get_all_observations(reg_ids=i.regid[1:])
                        for o in observations:
                            i.add_observation(o)

        if add_forecasts:
            years = ['2014-15', '2015-16', '2016-17', '2017-18', '2018-19']        # the years with data

            all_forecasts = []
            for y in years:
                region_ids = get_forecast_regions(year=y)
                from_date, to_date = get_forecast_dates(y)
                all_forecasts += gd.get_forecasted_dangers(region_ids, from_date, to_date)

            for i in varsom_incidents:
                incident_date = i.date
                incident_region_id = i.region_id
                print("{}: {}".format(i.location, incident_date))
                for f in all_forecasts:
                    forecast_date = f.date
                    forecast_region_id = f.region_regobs_id
                    if incident_date == forecast_date:
                        if incident_region_id == forecast_region_id:
                            i.add_forecast(f)

    return varsom_incidents
コード例 #13
0
def get_kdv(x_kdv, get_new=False):
    """Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
    An x_kdv is requested from the regObs api if a pickle file newer than a week exists.

    :param x_kdv:   [string]    x_kdv view
    :return dict:   {}          x_kdv as a dictionary

    Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
    Ex of url for returning values for IceCoverKDV in norwegian:
    http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json
    """

    kdv_file = '{0}{1}.pickle'.format(se.kdv_elements_folder, x_kdv)
    dict = {}

    if get_new:
        url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
            .format(se.odata_version, x_kdv)

        ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from URL:{0}".format(url))

        kdv = requests.get(url).json()

        for a in kdv['d']['results']:
            try:
                if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26:      # this table gets special treatment
                    dict[a["ID"]] = a["Description"]
                else:
                    dict[a["ID"]] = a["Name"]
            except (RuntimeError, TypeError, NameError):
                pass

            mp.pickle_anything(dict, kdv_file)

    else:
        if os.path.exists(kdv_file):

            # Useful to test if the file is old and if so make a new one
            max_file_age = 7
            mtime = os.path.getmtime(kdv_file)
            last_modified_date = dt.datetime.fromtimestamp(mtime).date()
            date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)

            # If file older than date limit, request a new.
            if last_modified_date < date_limit.date():
                dict = get_kdv(x_kdv, get_new=True)
            else:
                # ml.log_and_print("getregobsdata -> get_kdv: Getting KDV from pickle:{0}".format(kdv_file))
                dict = mp.unpickle_anything(kdv_file, print_message=False)

        else:
            dict = get_kdv(x_kdv, get_new=True)

    return dict
コード例 #14
0
def calculate_and_plot9d_regid(regid,
                               plot_folder=se.plot_folder,
                               observed_ice=None):
    """For an ice thickness on a given regObs RegID, a plot of will be made of the following 9 days development.
    If observed_ice is not given, it is looked up on regObs api by using its RegID. If observation is older
    than 11 days, no plot is made. Weather data is from GTS.

    1.1 If observed ice is none, get some on this regid
    1.2 Else use what is provided, but it has to be a list.
    2.  Get weather data.
    3.  Plot file if it is missing or it is newer than 11 days.

    :param regid:           [Int]           RegID as defined in regObs.
    :param plot_folder:     [string]        Path of folder for plots.
    :param observed_ice:    [ice.IceColumn] Optional. If not given, one will be looked up.
    """

    # if no observed ice is given, get it. Also, observed ice in the plotting routine is a list, so make it so.
    if not observed_ice:
        observed_ice = [gro.get_ice_thickness_on_regid(regid)]
    else:
        observed_ice = [observed_ice]

    x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata[
        'UTMNorth']

    from_date = observed_ice[0].date.date()
    to_date = from_date + dt.timedelta(days=9)

    # Get weather and snow data
    gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
    gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
    gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

    temp, date_times = we.strip_metadata(gridTemp, get_date_times=True)
    dates = [d.date() for d in date_times]
    sno = we.strip_metadata(gridSno)
    snotot = we.strip_metadata(gridSnoTot)
    cc = dp.clouds_from_precipitation(sno)

    # Define file name and tests for modelling and plotting
    plot_filename = '{0}{1}.png'.format(plot_folder, regid)

    try:
        icecover = it.calculate_ice_cover_air_temp(observed_ice[0], date_times,
                                                   temp, sno, cc)
        pts.plot_ice_cover_9dogn(icecover, observed_ice[0], dates, temp, sno,
                                 snotot, plot_filename)
    except:
        # raise
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            "[Error] calculateandplot.py -> calculate_and_plot9d_regid: {}. Could not plot {}."
            .format(error_msg, regid))
コード例 #15
0
 def set_avalanche_problems(self, problems):
     try:
         for p in problems:
             _ap = AvalancheWarningProblem()
             _ap.from_dict(p)
             self.avalanche_problems.append(_ap)
         # make sure lowest index (main problem) is first
         self.avalanche_problems.sort(
             key=lambda _p: _p.avalanche_problem_id)
     except TypeError:
         ml.log_and_print(
             'getforecastapi.py -> AvalancheWarning.set_avalanche_problems(): TypeError'
         )
コード例 #16
0
def pickle_anything(something_to_pickle, file_name_and_path, print_message=True):
    """Pickles anything.

    :param something_to_pickle:
    :param file_name_and_path:
    :param print_message:
    :return:
    """

    pickle.dump(something_to_pickle, open(file_name_and_path, 'wb'))

    if print_message is True:
        ml.log_and_print("[info] makepickle.py -> pickle_anything: {0} pickled.".format(file_name_and_path))
コード例 #17
0
def unpickle_anything(file_name_and_path, print_message=True):
    """Unpickles anything.

    :param file_name_and_path:
    :param print_message:
    :return something_to_unpickle:
    """

    something_to_unpickle = pickle.load( open(file_name_and_path, 'rb') )

    if print_message is True:
        ml.log_and_print("[info] makepickle.py -> unpickle_anything: {0} unpickled.".format(file_name_and_path))

    return something_to_unpickle
コード例 #18
0
def patch_novalue_in_weather_element_list(weather_element_list):
    """If the data provider sends a gapless data sett where no value is given as None in WeatherElement lists
    with gaps, this will patch it up. Simple patching using nearest neighbour values/avarages.

    :param weather_element_list:
    :return: weather_element_list
    """

    log_reference = 'weatherelement.py -> patch_novalue_in_weather_element_list: '
    location_id = weather_element_list[0].LocationID
    element_id = weather_element_list[0].ElementID

    # make sure we have first value
    if weather_element_list[0].Value is None:
        looker = 1
        replacement = weather_element_list[0 + looker].Value

        while replacement is None:
            looker += 1

            if looker > len(weather_element_list)-1:    # case of all values are None
                ml.log_and_print('{}No values in WeatherElement list. Patching not posiible. {} {}'.format(log_reference, location_id,element_id))
                return weather_element_list

            replacement = weather_element_list[0 + looker].Value

        weather_element_list[0].Value = replacement
        weather_element_list[0].Metadata['Value patched'] = 'First element missing. Use the next element with value {}'.format(replacement)
        ml.log_and_print('{}First date value missing on {} {} {}. Adding value {}.'.format(log_reference, location_id, weather_element_list[0].Date, element_id, replacement))

    # and the last value
    if weather_element_list[-1].Value is None:
        looker = 1
        replacement = weather_element_list[-1 - looker].Value

        while replacement is None:
            looker += 1
            replacement = weather_element_list[-1 - looker].Value

        weather_element_list[-1].Value = replacement
        weather_element_list[-1].Metadata['Value patched'] = 'Last element missing. Use the previous element with value {}'.format(replacement)
        ml.log_and_print('{}Las date value missing on {} {} {}. Adding value {}.'.format(log_reference, location_id, weather_element_list[-1].Date, element_id, replacement))

    # then check the ones in the middle
    for i, we in enumerate(weather_element_list):

        if we.Value is None:
            previous_value = weather_element_list[i-1].Value
            looker = 1
            next_value = weather_element_list[i + looker].Value
            while next_value is None:
                looker += 1
                next_value = weather_element_list[i + looker].Value
            average_value = previous_value + 1/(2*looker)*(next_value-previous_value)   # weight next value less if looker has gone looking.
            weather_element_list[i].Value = average_value
            weather_element_list[i].Metadata['Value patched'] = 'Use average value {}'.format(average_value)
            ml.log_and_print('{}Value missing on {} {} {}. Adding avg value {}.'.format(log_reference, location_id, weather_element_list[i].Date, element_id, average_value))

    return weather_element_list
コード例 #19
0
def unpickle_anything(file_name_and_path, print_message=True):
    """Unpickles anything.

    :param file_name_and_path:
    :param print_message:
    :return something_to_unpickle:
    """

    something_to_unpickle = pickle.load(open(file_name_and_path, 'rb'))

    if print_message is True:
        ml.log_and_print(
            "[info] makepickle.py -> unpickle_anything: {0} unpickled.".format(
                file_name_and_path))

    return something_to_unpickle
コード例 #20
0
def calculate_and_plot9d_regid(regid, plot_folder=se.plot_folder, observed_ice=None):
    """For an ice thickness on a given regObs RegID, a plot of will be made of the following 9 days development.
    If observed_ice is not given, it is looked up on regObs api by using its RegID. If observation is older
    than 11 days, no plot is made. Weather data is from GTS.

    1.1 If observed ice is none, get some on this regid
    1.2 Else use what is provided, but it has to be a list.
    2.  Get weather data.
    3.  Plot file if it is missing or it is newer than 11 days.

    :param regid:           [Int]           RegID as defined in regObs.
    :param plot_folder:     [string]        Path of folder for plots.
    :param observed_ice:    [ice.IceColumn] Optional. If not given, one will be looked up.
    """

    # if no observed ice is given, get it. Also, observed ice in the plotting routine is a list, so make it so.
    if not observed_ice:
        observed_ice = [gro.get_ice_thickness_on_regid(regid)]
    else:
        observed_ice = [observed_ice]

    x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata['UTMNorth']

    from_date = observed_ice[0].date.date()
    to_date = from_date + dt.timedelta(days=9)

    # Get weather and snow data
    gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
    gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
    gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

    temp, date_times = we.strip_metadata(gridTemp, get_date_times=True)
    dates = [d.date() for d in date_times]
    sno = we.strip_metadata(gridSno)
    snotot = we.strip_metadata(gridSnoTot)
    cc = dp.clouds_from_precipitation(sno)

    # Define file name and tests for modelling and plotting
    plot_filename = '{0}{1}.png'.format(plot_folder, regid)

    try:
        icecover = it.calculate_ice_cover_air_temp(observed_ice[0], date_times, temp, sno, cc)
        pts.plot_ice_cover_9dogn(icecover, observed_ice[0], dates, temp, sno, snotot, plot_filename)
    except:
        # raise
        error_msg = sys.exc_info()[0]
        ml.log_and_print("[Error] calculateandplot.py -> calculate_and_plot9d_regid: {}. Could not plot {}.".format(error_msg, regid))
コード例 #21
0
def pickle_anything(something_to_pickle,
                    file_name_and_path,
                    print_message=True):
    """Pickles anything.

    :param something_to_pickle:
    :param file_name_and_path:
    :param print_message:
    :return:
    """

    pickle.dump(something_to_pickle, open(file_name_and_path, 'wb'))

    if print_message is True:
        ml.log_and_print(
            "[info] makepickle.py -> pickle_anything: {0} pickled.".format(
                file_name_and_path))
コード例 #22
0
def get_ice_thickness_layers(RegID):
    """
    This method returns the ice layes of a given registration (RegID) in regObs. it reads only what is below the first
    solid ice layer. Thus snow and slush on the ice is not covered here and is added separately in the public method
    for retrieving the full ice column.

    This method is an internal method for getRegObdata.py

    :param RegID:
    :return:

    Example og a ice layer object in regObs:
    http://api.nve.no/hydrology/regobs/v0.9.5/Odata.svc/IceThicknessLayerV?$filter=RegID%20eq%2034801%20and%20LangKey%20eq%201&$format=json

    """

    view = 'IceThicknessLayerV'

    url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/{1}?" \
          "$filter=RegID eq {2} and LangKey eq 1&$format=json"\
        .format(se.odata_version, view, RegID)
    data = requests.get(url).json()
    datalist = data['d']['results']

    layers = []

    for l in datalist:

        thickness = l['IceLayerThickness']
        if thickness == None or float(thickness) == 0:
            ml.log_and_print('getregobsdata.py -> get_ice_thickness_layers: RegID {0} har icelayers of None thicness.'.format(RegID))
            # return empty list if some layers at zero or none.
            reversed_layers = []
            return reversed_layers

        else:
            regobs_layer_name = l['IceLayerName']
            layer_type = get_tid_from_name('IceLayerKDV', regobs_layer_name)
            layer_name = get_ice_type_from_tid(layer_type)

            layer = ice.IceLayer(float(thickness), layer_name)
            layers.append(layer)

    return layers
コード例 #23
0
def write_weather_element_list(data, file_name='', extension='csv'):
    """A quick way to print a list of weatherements to file.

    :param data:
    :param file_name:
    :param extension:
    :return:
    """

    if len(data) == 0:
        ml.log_and_print(
            "makefiledata -> write_weather_element: No data makes no file.")

    elif not isinstance(data[0], we.WeatherElement):
        ml.log_and_print(
            "makefiledata -> write_weather_element: This method only for weather elements."
        )

    else:
        if file_name == '':
            #file_name = '{0}test_write_weather_element.{1}'.format(se.data_path, extension)
            file_name = '{0}{1} {2} {3}-{4}.{5}'.format(
                se.data_path, data[0].LocationID, data[0].ElementID,
                data[0].Date.strftime('%Y%m%d'),
                data[-1].Date.strftime('%Y%m%d'), extension)

        f = open(file_name, 'a', encoding='utf-8')

        # write header
        f.write('{0} {1} from {2} to {3}\n'.format(
            data[0].LocationID, data[0].ElementID,
            data[0].Date.strftime('%Y%m%d %H:%M'),
            data[-1].Date.strftime('%Y%m%d %H:%M')))

        # write values
        for d in data:

            text_line = '{0};{1}'.format(d.Date.strftime('%Y%m%d/%H%M'),
                                         d.Value)
            text_line += '\n'
            f.write(text_line)

        f.close()
コード例 #24
0
def get_data(region_id, start_date, end_date, get_new=True):
    """Gets all the data needed in the plots and pickles it so that I don't need to do requests to make plots.

    :param region_id:       [int] Region ID is an int as given i ForecastRegionKDV
    :param start_date:      [string] Start date.
    :param end_date:        [string] End date.
    :param get_new:         [bool] If true, new data is requested. If false, a local pickle is used for data.
    :return problems, dangers, aval_indexes:
    """

    file_name = "{3}plotdangerandproblem_region{0}_{1}{2}.pickle".format(
        region_id, start_date.strftime('%Y'), end_date.strftime('%y'),
        env.local_storage)

    if not get_new and not os.path.exists(file_name):
        get_new = True
        ml.log_and_print(
            "[info] {0}get_data: pickle missing, getting new data.".format(
                log_reference),
            print_it=True)

    if get_new:
        dangers = gd.get_all_dangers(region_id, start_date, end_date)

        # Early years don't have the avalanche problem we will be analyzing
        if start_date > dt.date(2014, 11, 1):
            problems = gp.get_all_problems(region_id,
                                           start_date,
                                           end_date,
                                           add_danger_level=False)
        else:
            problems = []

        aval_indexes = gm.get_avalanche_index(start_date,
                                              end_date,
                                              region_ids=region_id)
        mp.pickle_anything([problems, dangers, aval_indexes], file_name)

    else:
        problems, dangers, aval_indexes = mp.unpickle_anything(file_name)

    return problems, dangers, aval_indexes
コード例 #25
0
ファイル: readfile.py プロジェクト: widforss/varsomdata
def read_configuration_file(file_name, element_class):
    """

    :param file_name:
    :param element_class:
    :return:
    """

    ml.log_and_print(
        "[info] readfile.py -> read_configuration_file: Reading {0}".format(
            file_name))

    with open(file_name, 'rb') as f:
        inn_data = f.read()

    inn_data = inn_data.decode('utf-8')

    inn_data = inn_data.replace('\r', '\n')
    inn_data = inn_data.replace('\n\n', '\n')

    # separate the rows
    inn_data = inn_data.split('\n')

    separator = ';'
    elements = []
    for i in range(1, len(inn_data), 1):

        inn_data[i] = inn_data[i].strip()  # get rid of ' ' and '\n' and such
        if inn_data[i] == '':  # blank line at end of file
            break

        row = inn_data[i].split(
            separator)  # splits line into list of elements in the line

        element = element_class()
        element.add_configuration_row(row)

        elements.append(element)

    return elements
コード例 #26
0
def make_observer_plots(all_observations_list,
                        observer_list,
                        months,
                        plot_folder=env.plot_folder,
                        html_folder=env.output_folder + 'views/'):
    """Method prepares data for plotting and making the corresponding table for the observations for a list of
    observers.

    :param all_observations_list:
    :param observer_list:           [list of ObserverData]
    :param months:
    :param plot_folder:         Folder for saving the plots.
    :param html_folder:         Output folder for the html files generated.
    :return:
    """

    # if not a list, make it so
    if not isinstance(observer_list, list):
        observer_list = [observer_list]

    for o in observer_list:

        ml.log_and_print(
            "[info] plotcalendardata.py -> make_observer_plots: {} {}".format(
                o.observer_id, o.observer_nick))
        observers_observations_list = [
            all_obs for all_obs in all_observations_list
            if all_obs.ObserverId == o.observer_id
        ]

        # plot one month at the time
        for m in months:
            dates = _make_day_data_list(observers_observations_list, m, o=o)

            _make_plot(dates,
                       observer_name=o.observer_nick,
                       plot_folder=plot_folder)
            _make_html(dates,
                       observer_id=o.observer_id,
                       html_folder=html_folder)
コード例 #27
0
def get_observations_on_location_id(location_id, year, get_new=False):
    """Uses new or stored data from get_all_season_ice and picks out one requested location.
    First ice cover is mapped to Ice.IceColumn of zero height. Ice cover lost (mid season or last) the same.

    :param location_id:     [int] location id as used in regObs
    :param year:            [string] Eg '2018-19'
    :param get_new:         [bool] if get_new, new data is requested from regObs
    :return:                [list of IceThickness]
    """

    all_locations = get_all_season_ice(year, get_new=get_new)

    # get_all_season_ice returns a dictionary with observations grouped by location_id.
    observations_on_location_for_modeling = []

    try:
        observations_on_location_for_modeling = all_locations[location_id]

    except Exception as e:
        ml.log_and_print("getregobsdata.py -> get_observations_on_location_id: {0} not found probably..".format(location_id), print_it=True)

    return observations_on_location_for_modeling
コード例 #28
0
def get_dates_from_year(year, date_format='yyyy-mm-dd'):
    """Returns start and end dates for given season. Hydrological year from 1. sept.
     Format may be specified for datetime or date or string (default).

    :param year:            [String]    E.g. '2018-19'
    :param date_format:     [String]    'yyyy-mm-dd', 'date' or 'datetime'
    :return:
    """

    log_ref = 'getregobsdata.py -> get_dates_from_year:'

    from_year = int(year[0:4])
    century = int(year[0:2]) * 100
    year_in_century = int(year[5:7])

    # We build for the future. Method will work for passing centuries.
    if year_in_century != 0:
        # Same century
        last_year = century + year_in_century
    else:
        # New century
        last_year = century + 100 + year_in_century

    from_date = str(from_year) + '-09-01'
    to_date = str(last_year) + '-09-01'

    if 'yyyy-mm-dd' in date_format:
        return from_date, to_date
    elif 'date' in date_format:
        from_date = dt.datetime.strptime(from_date, '%Y-%m-%d').date()
        to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()
        return from_date, to_date
    elif 'datetime' in date_format:
        from_date = dt.datetime.strptime(from_date, '%Y-%m-%d')
        to_date = dt.datetime.strptime(to_date, '%Y-%m-%d')
        return from_date, to_date
    else:
        ml.log_and_print("[Error] {0} Date format not supported.".format(log_ref))
        return 'Date format not supported.'
コード例 #29
0
def get_masl_from_utm33(x, y):
    """Returns the altitude of a given UTM33N coordinate.

    Method uses an NVE map service which covers only Norway. If NoData method returns None

    :param x:       [int] east
    :param y:       [int] north
    :return masl:   [int] meters above sea level
    """

    url = 'https://gis3.nve.no/arcgis/rest/services/ImageService/SK_DTM20_NSF/ImageServer/identify' \
          '?geometry={0},{1}&geometryType=esriGeometryPoint&inSR=32633&spatialRel=esriSpatialRelIntersects' \
          '&relationParam=&objectIds=&where=&time=&returnCountOnly=false&returnIdsOnly=false&returnGeometry=false' \
          '&maxAllowableOffset=&outSR=&outFields=*&f=pjson'.format(x, y)

    data = requests.get(url).json()
    masl = data['value']

    if 'NoData' in masl:
        ml.log_and_print("[warning] getmisc.py -> get_masl_from_utm33: No data elevation data for x:{} y:{}".format(x,y))
        return None
    else:
        return int(masl)
コード例 #30
0
def write_weather_element_list(data, file_name='', extension='csv'):
    """A quick way to print a list of weatherements to file.

    :param data:
    :param file_name:
    :param extension:
    :return:
    """

    if len(data) == 0:
        ml.log_and_print("makefiledata -> write_weather_element: No data makes no file.")

    elif not isinstance(data[0], we.WeatherElement):
        ml.log_and_print("makefiledata -> write_weather_element: This method only for weather elements.")

    else:
        if file_name == '':
            #file_name = '{0}test_write_weather_element.{1}'.format(se.data_path, extension)
            file_name = '{0}{1} {2} {3}-{4}.{5}'.format(
                se.data_path, data[0].LocationID, data[0].ElementID,
                data[0].Date.strftime('%Y%m%d'), data[-1].Date.strftime('%Y%m%d'), extension)

        f = open(file_name, 'a', encoding='utf-8')

        # write header
        f.write('{0} {1} from {2} to {3}\n'.format(
            data[0].LocationID, data[0].ElementID, data[0].Date.strftime('%Y%m%d %H:%M'), data[-1].Date.strftime('%Y%m%d %H:%M')))

        # write values
        for d in data:

            text_line = '{0};{1}'.format(d.Date.strftime('%Y%m%d/%H%M'), d.Value)
            text_line += '\n'
            f.write(text_line)

        f.close()
コード例 #31
0
def _make_one_request(from_date=None, to_date=None, reg_id=None, registration_types=None,
        region_ids=None, location_id=None, observer_id=None, observer_nick=None, observer_competence=None,
        group_id=None, output='List', geohazard_tids=None, lang_key=1, recursive_count=5):
    """Part of get_data method. Parameters the same except observer_id and reg_id can not be lists.
    """

    # Dates in the web-api request are strings
    if isinstance(from_date, dt.date):
        from_date = dt.date.strftime(from_date, '%Y-%m-%d')
    elif isinstance(from_date, dt.datetime):
        from_date = dt.datetime.strftime(from_date, '%Y-%m-%d')

    if isinstance(to_date, dt.date):
        to_date = dt.date.strftime(to_date, '%Y-%m-%d')
    elif isinstance(to_date, dt.datetime):
        to_date = dt.datetime.strftime(to_date, '%Y-%m-%d')

    data = []  # data from one query

    # query object posted in the request
    rssquery = {'LangKey': lang_key,
                'RegId': reg_id,
                'ObserverGuid': None,  # eg. '4d11f3cc-07c5-4f43-837a-6597d318143c',
                'SelectedRegistrationTypes': _reg_types_dict(registration_types),
                'SelectedRegions': region_ids,
                'SelectedGeoHazards': geohazard_tids,
                'ObserverId': observer_id,
                'ObserverNickName': observer_nick,
                'ObserverCompetence': observer_competence,
                'GroupId': group_id,
                'LocationId': location_id,
                'FromDate': from_date,
                'ToDate': to_date,
                'NumberOfRecords': None,  # int
                'Offset': 0}

    url = 'https://api.nve.no/hydrology/regobs/webapi_{0}/Search/Rss?geoHazard=0'.format(se.web_api_version)
    more_available = True

    # get data from regObs api. It returns 100 items at a time. If more, continue requesting with an offset. Paging.
    while more_available:

        # try or if there is an exception, try again.
        try:
            r = requests.post(url, json=rssquery)
            responds = r.json()
            data += responds['Results']

            if output == 'Count nest':
                ml.log_and_print('getobservations.py -> _make_one_request: total matches {0}'.format(responds['TotalMatches']))
                return [responds['TotalMatches']]

        except:
            ml.log_and_print("getobservations.py -> _make_one_request: EXCEPTION. RECURSIVE COUNT {0}".format(recursive_count))
            if recursive_count > 1:
                recursive_count -= 1  # count down
                data += _make_one_request(from_date=from_date,
                                          to_date=to_date,
                                          reg_id=reg_id,
                                          registration_types=registration_types,
                                          region_ids=region_ids,
                                          location_id=location_id,
                                          observer_id=observer_id,
                                          observer_nick=observer_nick,
                                          observer_competence=observer_competence,
                                          group_id=group_id,
                                          output=output,
                                          geohazard_tids=geohazard_tids,
                                          lang_key=lang_key,
                                          recursive_count=recursive_count)

        # log request status
        if responds['TotalMatches'] == 0:
            ml.log_and_print("getobservations.py -> _make_one_request: no data")
        else:
            ml.log_and_print('getobservations.py -> _make_one_request: {0:.2f}%'.format(len(data) / responds['TotalMatches'] * 100))

        # if more get more by adding to the offset
        if len(data) < responds['TotalMatches']:
            rssquery["Offset"] += 100
        else:
            more_available = False

    return data
コード例 #32
0
def calculate_and_plot9d_season(period='2018-19'):
    """Calculate ice columns for 9 days and make plots of all ice thickness for a given season or optionally 'Today'.

    The inner workings:
    1.1 Retrieves ice thickness observations from regObs. If period is given as a season, all observations for
        this season will be requested. All previous plots and local storage will be deleted.
    1.2 If period='Today' ice thickness observations from today will be requested and plotted. Older plots will be
        in the folder. Metadata dict will be merged.
    2.  Calculate the 9 day prognosis from the observation time and plots the result.
    3.  Make a metadata json for handling files on iskart.no. Only confirmed files in folder will be
        added to metadata json.

    :param period:    [String] Default is current season (2017-18).
    :return:
    """

    log_referance = 'calculateandplot.py -> calculate_and_plot9d_season'

    # File names
    regid_metadata_json = '{}regid_metadata.json'.format(
        se.ni_dogn_plots_folder)
    regid_metadata_pickle = '{}regid_metadata.pickle'.format(se.local_storage)

    if period == 'Today':
        ice_thicks = gro.get_ice_thickness_today()

    else:
        # Empty the 9dogn folder
        # for file in os.listdir(se.ni_dogn_plots_folder):
        #     file_path = os.path.join(se.ni_dogn_plots_folder, file)
        #     try:
        #         if os.path.isfile(file_path):
        #             os.unlink(file_path)
        #     except OSError:
        #         pass

        # remove pickle with metadata
        try:
            os.remove(regid_metadata_pickle)
        except OSError:
            pass

        # Get new observations
        ice_thicks = gro.get_ice_thickness_observations(period,
                                                        reset_and_get_new=True)

    # Calculate and plot
    for k, v in ice_thicks.items():

        # If file is missing, make it. If observation is older than 11 days it is based on gridded data for sure and no plot file needed.
        make_plot = False
        max_file_age = 11
        date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
        file_names = os.listdir(se.ni_dogn_plots_folder)
        plot_filename = '{0}.png'.format(k)
        if plot_filename not in file_names:
            make_plot = True
        else:
            if v.date.date() > date_limit.date():
                make_plot = True

        if make_plot:
            try:
                calculate_and_plot9d_regid(k,
                                           plot_folder=se.ni_dogn_plots_folder,
                                           observed_ice=v)
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print(
                    "[Error] {} Error making plot for {} {}".format(
                        log_referance, k, error_msg))

    # Make json with metadata for using files on iskart.no. Load metadata from pickle if available and
    # new observations where a plot is available will be made.
    if not os.path.exists(regid_metadata_pickle):
        regid_metadata = {}
    else:
        regid_metadata = mp.unpickle_anything(regid_metadata_pickle)

    list_of_plots = os.listdir(se.ni_dogn_plots_folder)

    for k, v in ice_thicks.items():
        # only add metadata on files that are in the folder
        if '{0}.png'.format(k) in list_of_plots:
            date = v.date.date()

            region_name = v.metadata['OriginalObject']['ForecastRegionName']
            if not region_name:
                region_name = 'Ukjent region'
            x, y = v.metadata['UTMEast'], v.metadata['UTMNorth']
            lake_id = v.metadata['LocationID']
            lake_name = v.metadata['LocationName']
            if not lake_name:
                lake_name = 'E{} N{}'.format(x, y)

            regid_metadata[k] = {
                'RegionName': region_name,
                'LakeID': lake_id,
                'LakeName': lake_name,
                'Date': '{}'.format(date)
            }

    mp.pickle_anything(regid_metadata, regid_metadata_pickle)

    json_string = json.dumps(regid_metadata,
                             ensure_ascii=False).encode('utf-8')
    with open(regid_metadata_json, 'wb') as f:
        f.write(json_string)
コード例 #33
0
def _get_general(registration_class_type, registration_types, from_date, to_date, region_ids=None, location_id=None,
        observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
        output='List', geohazard_tids=None, lang_key=1):
    """Gets observations of a requested type and mapps them to a class.

    :param registration_class_type: [class for the requested observations]
    :param registration_types:  [int] RegistrationTID for the requested observation type
    :param from_date:           [date] A query returns [from_date, to_date]
    :param to_date:             [date] A query returns [from_date, to_date]
    :param region_ids:          [int or list of ints] If region_ids = None, all regions are selected
    :param observer_ids:        [int or list of ints] If observer_ids = None, all observers are selected
    :param observer_nick:       [int or list of ints] Default None gives all.
    :param observer_competence: [string] Part of a observer nick name
    :param group_id:            [int]
    :param output:              [string] Options: 'List', 'DataFrame' and 'Count'. Default 'List'.
    :param geohazard_tids       [int or list of ints] 10 is snow, 20,30,40 are dirt, 60 is water and 70 is ice
    :param lang_key             [int] 1 is norwegian, 2 is english

    :return:
    """

    list = None
    if output not in ['List', 'DataFrame', 'Count']:
        ml.log_and_print('getobservations.py -> _get_general: Illegal output option.')
        return list

    # In these methods "Count" is obviously to count the list ov observations weras in the more general get_data
    # counting a list and counting a nested list of full registratoins are two different tings.
    output_for_get_data = output
    if output == 'Count':
        output_for_get_data = 'Count list'
    # Dataframes are based on the lists
    if output == 'DataFrame':
        output_for_get_data = 'List'

    # AvalancheEvaluation3 = 31 and is the table for observed avalanche evaluations.
    data_with_more = get_data(from_date=from_date, to_date=to_date, region_ids=region_ids, observer_ids=observer_ids,
                              observer_nick=observer_nick, observer_competence=observer_competence,
                              group_id=group_id, location_id=location_id, lang_key=lang_key,
                              output=output_for_get_data, registration_types=registration_types, geohazard_tids=geohazard_tids)

    # wash out all other observation types
    data = []
    if registration_types:
        for d in data_with_more:
            if d['RegistrationTid'] == registration_types:
                data.append(d)
    else:   # regisrtation_types is None is for all registrations and no single type is picked out.
        data = data_with_more

    if output == 'List' or output == 'DataFrame':
        list = [registration_class_type(d) for d in data]
        list = sorted(list, key=lambda registration_class_type: registration_class_type.DtObsTime)

    if output == 'List':
        return list

    if output == 'DataFrame':
        return _make_data_frame(list)

    if output == 'Count':
        return data
コード例 #34
0
def plot_season_for_all_regobs_locations(year='2018-19',
                                         calculate_new=False,
                                         get_new_obs=False,
                                         make_plots=False,
                                         delete_old_plots=False):
    """Method specialized for scheduled plotting for iskart.no.
    Method makes a season plot for all ObsLocations in regObs where we have a first ice date.

    It may take some time to plot. 250 lakes for a season and for each plot weather params are requested from the GTS.

    The workings of the method:
    1.  get all locations ids and belonging observations where we have first ice.
    2.1 if calculate new, empty sesong folder and pickle in local storage and calculate (and make plots if requested).
    2.2 Make metadata json for showing files on iskart.no
    3.  All calculations are compared to observed data in scatter plot.

    :param year:                [String] Season for plotting. eg: '2016-17'
    :param calculate_new:       [bool] Calculate new ice thicks. If false only make the seasonal scatter.
    :param get_new_obs:         [bool]
    :param make_plots:          [bool]  If False all calculations are made, but only the scatter comparison against observatiosn is ploted
    :param delete_old_plots:    [bool]  If True all former plots and pickles are removed.
    """

    pickle_file_name_and_path = '{0}all_calculated_ice_{1}.pickle'.format(
        se.local_storage, year)
    location_id_metadata_json = '{}location_id_metadata.json'.format(
        se.sesong_plots_folder)

    if calculate_new:
        if delete_old_plots:
            # Empty the sesong plot folder
            for file in os.listdir(se.sesong_plots_folder):
                file_path = os.path.join(se.sesong_plots_folder, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except OSError:
                    pass

            # remove pickle old data - because we are getting new
            try:
                os.remove(pickle_file_name_and_path)
            except OSError:
                pass

        all_observations = gro.get_all_season_ice(year, get_new=get_new_obs)
        from_date, to_date = gm.get_dates_from_year(year)
        all_calculated = {}
        all_observed = {}
        location_id_metadata = {}

        for location_id, observed_ice in all_observations.items():
            try:
                calculated, observed, plot_filename = _plot_season(
                    location_id,
                    from_date,
                    to_date,
                    observed_ice,
                    make_plots=make_plots,
                    plot_folder=se.sesong_plots_folder)
                all_calculated[location_id] = calculated
                all_observed[location_id] = observed
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print(
                    "[error] calculateandplot.py -> plot_season_for_all_regobs_locations: Error making plot for {}"
                    .format(error_msg, location_id))

            # Make the json with metadata needed for iskart.no. Add only if the plot was made and thus file exists.
            if os.path.isfile(se.sesong_plots_folder + plot_filename):

                region_name = observed_ice[0].metadata['OriginalObject'][
                    'ForecastRegionName']
                if not region_name:
                    region_name = 'Ukjent region'
                lake_id = observed_ice[0].metadata['LocationID']
                x, y = observed_ice[0].metadata['UTMEast'], observed_ice[
                    0].metadata['UTMNorth']
                lake_name = observed_ice[0].metadata['LocationName']
                if not lake_name:
                    lake_name = 'E{} N{}'.format(x, y)

                location_id_metadata[location_id] = {
                    'RegionName': region_name,
                    'LakeID': lake_id,
                    'LakeName': lake_name,
                    'PlotFileName': plot_filename
                }

        mp.pickle_anything([all_calculated, all_observed],
                           pickle_file_name_and_path)

        try:
            json_string = json.dumps(location_id_metadata,
                                     ensure_ascii=False).encode('utf-8')
            with open(location_id_metadata_json, 'wb') as f:
                f.write(json_string)
        except:
            error_msg = sys.exc_info()[0]
            ml.log_and_print(
                "[error]calculateandplot.py -> plot_season_for_all_regobs_locations: Cant write json. {}"
                .format(error_msg))

    else:
        [all_calculated,
         all_observed] = mp.unpickle_anything(pickle_file_name_and_path)

    try:
        pts.scatter_calculated_vs_observed(all_calculated, all_observed, year)
    except:
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            "[error] calculateandplot.py -> plot_season_for_all_regobs_locations: {}. Could not plot scatter {}."
            .format(error_msg, year))
コード例 #35
0
def getgts(utm33x,
           utm33y,
           element_id,
           from_date,
           to_date,
           timeseries_type=0,
           patch_missing_values=True):
    """Retrieves data from the grid time series application (GTS) and maps it to a list of WeatherElements.

    Values in WeatherElements are given in meters, i.e in some cases they are converted from cm to m.
    Optionally the data is patched up if data is missing and daily averages from 00-24 are calculated.

    GTS data is given as 24hour avarages from 0600-0600. If timeseries_type=0 is requested, data
    is converted to daily average from 00-24hrs, time stamped at the end of the period (23:59:59).

    For wind data, fist data in the data set is from 1st march 2018.

    :param utm33x:              [int] X coordinate in utm33N
    :param utm33y:              [int] Y coordinate in utm33N
    :param element_id:          [string] Element ID in seNorge. Ex: elementID = 'fsw' is 24hr new snow depth in [cm]
    :param from_date:           [datetime or string YYYY-mm-dd] method returns data [fromDate, toDate]
    :param to_date:             [datetime or string YYYY-mm-dd] method returns data [fromDate, toDate]
    :param timeseries_type      [int] daily = 0 (default), no change = -1
    :param patch_missing_values:[bool] Go through the list and check if som values are missing. If so, patch up.
    :param output:              [list of WeatherElements]
    :return:

    http://h-web02.nve.no:8080/api/GridTimeSeries/gridtimeserie?theme=tm&startdate=2017-11-20&enddate=2017-11-22&x=109190&y=6817490

    timeseries_type's:
        -1                  Data returned as received from service
        0                   Data made to daily average from 00-24hrs
        See also:           http://eklima.no/wsKlima/complete/cTimeserie_en.html

    element_id's used:
        fws:                new snow last 24hrs in mm water equivalents
        sd:                 snow depth in cm
        tm:                 temperature average 24hrs
        sdfsw:              new snow last 24hrs in cm
        windSpeed10m24h06:  10m wind speed over 24hrs ranging from 06-06

    Wind is not tested yet:

    Vindretning døgn:      http://h-web02:8080/api/GridTimeSeries/953709/7938592/2018-03-26/2018-04-17/windDirection10m24h06.json
    Vindretning 3 timer:   http://h-web02:8080/api/GridTimeSeries/953709/7938592/2018-03-26/2018-04-17/windDirection10m3h.json

    Vind hastighet døgn:   http://h-web02:8080/api/GridTimeSeries/953709/7938592/2018-03-26/2018-04-17/windSpeed10m24h06.json
    Vindhastighet 3 timer: http://h-web02:8080/api/GridTimeSeries/953709/7938592/2018-03-26/2018-04-17/windSpeed10m3h.json

    """

    url = 'http://h-web02.nve.no:8080/api/GridTimeSeries/gridtimeserie?theme={0}&startdate={1}&enddate={2}&x={3}&y={4}'.format(
        element_id, from_date, to_date, utm33x, utm33y)

    responds = rq.get(url)

    full_data = responds.json()
    if 'Error' in full_data:
        ml.log_and_print("[error] getgts.py -> getgts: {0}".format(full_data))
        return []

    else:
        data = full_data['Data']

        weather_element_list = []
        date = dt.datetime.strptime(full_data['StartDate'],
                                    '%d.%m.%Y %H:%M:%S')

        # the assigned NoDataValue if one data element is missing.
        no_data_value = int(full_data['NoDataValue'])

        for d in data:
            value = float(d)
            if value == no_data_value:
                value = None
            weather_element = we.WeatherElement(
                'UTM33 X{0} Y{1}'.format(utm33x, utm33y), date, element_id,
                value)
            weather_element.Metadata['DataSource'] = 'GTS'
            weather_element.Metadata['TimeResolution'] = full_data[
                'TimeResolution']
            weather_element.Metadata['FullName'] = full_data['FullName']
            weather_element.Metadata['WeatherDataAltitude'] = full_data[
                'Altitude']
            weather_element_list.append(weather_element)
            date += dt.timedelta(minutes=full_data['TimeResolution'])

        if patch_missing_values:
            weather_element_list = we.patch_novalue_in_weather_element_list(
                weather_element_list)

        if element_id == 'fsw' or element_id == 'sd' or element_id == 'sdfsw':
            weather_element_list = we.meter_from_centimeter(
                weather_element_list)  # convert for [cm] til [m]

        if timeseries_type == 0:
            weather_element_list = we.make_daily_average(weather_element_list)

            # fist element after doing daily average represents the day before the requested time period
            del weather_element_list[0]

        return weather_element_list
コード例 #36
0
def get_ice_thickness_from_surface_temp(ic, time_step, dh_snow, temp, melt_energy=None):
    """Given surface temperature and new snow on an ice-column, ice evolution is estimated. In the simplest case
    the surface temp is estimated from air temperature. More advances approaches calculates surface temperature
    by solving er energy balance equation.

    :param ic:          Ice column at the beginning of the time step. Object containing the ice column with metadata
    :param dh_snow:     New snow in period of time step. Given as float in SI units [m]
    :param temp:        Average temperature in period of time step. Given i C as float.
    :param time_step:   In seconds. 60*60*24 = 86400 is 24hrs
    :return:            Ice column at end of time step
    """

    dh_snow = float(dh_snow)

    # step the date forward one time step. We do it initially because the variable is also used and subtracted in the following calculations.
    ic.time_step_forward(time_step)

    # Add new snow on top of the column if we have ice and snow
    # and update the slush level/buoyancy given new snow
    if len(ic.column) != 0:
        if dh_snow != 0.:
            ic.add_layer_at_index(0, ice.IceLayer(dh_snow, 'new_snow'))
        ic.update_slush_level()

    # if surface or air temperature is FREEZING
    if temp < const.temp_f:

        # If no ice, freeze water to ice
        if len(ic.column) == 0:
            # The heat flux equation gives how much water will freeze. U_total for the equation is estimated.
            U_total = ice.add_layer_conductance_to_total(None, const.k_black_ice, 0, 10)
            dh = - temp * U_total * time_step / const.rho_water / const.L_fusion
            ic.add_layer_at_index(0, ice.IceLayer(dh, 'black_ice'))
            pass

        else:
            # Declaration of total conductance of layers above freezing layer
            U_total = None
            i = 0
            while time_step > 0 and i <= len(ic.column)-1:

                # If the layer is a solid, it only adds to the total isolation. Unless it is the last and water is frozen to ice.
                if (ic.column[i].get_enum()) > 9:
                    U_total = ice.add_layer_conductance_to_total(U_total, ic.column[i].conductivity, ic.column[i].height, ic.column[i].get_enum())

                    # If the layer is the last layer of solids and thus at the bottom, we get freezing at the bottom
                    if i == len(ic.column)-1:

                        # The heat flux equation gives how much water will freeze.
                        dh = - temp * U_total * time_step / const.rho_water / const.L_fusion
                        ic.add_layer_at_index(i+1, ice.IceLayer(dh, 'black_ice'))
                        time_step = 0

                # Else the layer is a slush layer above or in the ice column and it will freeze fully or partially.
                # Note, we do not freeze slush in the same time step it occurs.
                elif not ic.in_slush_event:

                    # If the total conductance is None, we are dealing with the top layer and a surface/thin ice conductance mut be defined.
                    if U_total is None:
                        U_total = ice.add_layer_conductance_to_total(None, const.k_slush_ice, 0, 11)

                    # Only the water part in the slush freezes
                    dh = - temp * U_total * time_step / const.rho_water / const.L_fusion / (1 - const.part_ice_in_slush)

                    # If a layer totaly freezes during the tieme period, the rest of the time will be used to freeze a layer further down.
                    if ic.column[i].height < dh:

                        ic.column[i].set_type('slush_ice')

                        # The heat flux equation sorted for time
                        time_step_used = ic.column[i].height * const.rho_water * const.L_fusion * (1 - const.part_ice_in_slush) / -temp / U_total
                        time_step = time_step - time_step_used

                        # Layer height increases when water in the layer freezes
                        ic.column[i].height += ic.column[i].height * (1 - const.part_ice_in_slush) * ((const.rho_water - const.rho_slush_ice) / const.rho_slush_ice)

                        # Update conductance
                        U_total = ice.add_layer_conductance_to_total(U_total, ic.column[i].conductivity, ic.column[i].height, ic.column[i].get_enum())

                    # Else all energy is used to freeze the layer only partially
                    else:
                        # The thickness that remains slush
                        ic.column[i].height -= dh

                        # dh has frozen to slush ice. Layer height increases when water in the layer freezes.
                        dh += dh * (1 - const.part_ice_in_slush) * ((const.rho_water - const.rho_slush_ice) / const.rho_slush_ice)
                        ic.add_layer_at_index(i, ice.IceLayer(dh, 'slush_ice'))

                        # Nothing more to freeze
                        time_step = 0

                # Slush event has happened and this is the first time step after the slush event. Do not create ice in the first time step.
                else:
                    # ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: No freezing event in the current time step due to slush event.", log_it=False, print_it=True)
                    ic.in_slush_event = False
                    # If we don't set time step to 0, layers further down will freeze.
                    time_step = 0

                # Go to next ice layer
                i += 1

    # if surface or air temperature is MELTING
    else:
        # In case surface temperatures are above 0C (when air temp is used to calculate ice evolution) there
        # should not be submitted a energy term from the energy balance calculations (melt_energy = None).
        if temp > 0.:
            # all melting is made by simple degree day model using different calibration constants for snow,
            # slush ice and black ice melting only effects the top layer (index = 0)
            while time_step > 0 and len(ic.column) > 0:
                if ic.column[0].type == 'water':
                    ic.remove_layer_at_index(0)
                else:
                    if ic.column[0].get_enum() >= 20: # snow
                        meltingcoeff = const.meltingcoeff_snow
                    elif ic.column[0].type == 'slush_ice':
                        meltingcoeff = const.meltingcoeff_slush_ice
                    elif ic.column[0].type == 'slush':
                        meltingcoeff = const.meltingcoeff_slush
                    elif ic.column[0].type == 'black_ice':
                        meltingcoeff = const.meltingcoeff_black_ice
                    else:
                        ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: Melting on unknown layer type: {0}. Using slush_ice coeff.".format(ic.column[0].type))
                        meltingcoeff = const.meltingcoeff_slush_ice

                    # degree day melting. I have separated the time factor from the melting coefficiant.
                    dh = meltingcoeff * time_step * (temp - const.temp_f)

                    # if layer is thinner than total melting the layer is removed and the rest of melting occurs
                    # in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
                    if ic.column[0].height < -dh:
                        time_step_used = ic.column[0].height / -dh * time_step
                        ic.remove_layer_at_index(0)
                        time_step = time_step - time_step_used

                    # the layer is only partly melted during this time_step
                    else:
                        ic.column[0].height = ic.column[0].height + dh
                        time_step = 0

        # In case surface temp is calculated from energy balance, surface temp is never above 0C, but if we have
        # melting and thus melt_energy is not None and temp == 0.
        elif melt_energy is not None:
            while time_step > 0 and len(ic.column) > 0:
                if ic.column[0].type == 'water':
                    ic.remove_layer_at_index(0)
                else:
                    # energy available to melt used with latent heat of fusion (delta_h = Q/L/rho)
                    L_ice = const.L_fusion/1000.    # Joule to Kilo Joule
                    dh = melt_energy / L_ice / ic.column[0].density * time_step/24/60/60

                    # if layer is thinner than total melting the layer is removed and the rest of melting occurs
                    # in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
                    if ic.column[0].height < -dh:
                        time_step_used = ic.column[0].height / -dh * time_step
                        ic.remove_layer_at_index(0)
                        time_step = time_step - time_step_used

                    # the layer is only partly melted during this time_step
                    else:
                        ic.column[0].height = ic.column[0].height + dh
                        time_step = 0

        else:
            ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: Need either energy or positive temperatures in model to melt snow and ice.")

    ic.merge_and_remove_excess_layers()
    ic.merge_snow_layers_and_compress(temp)
    ic.update_draft_thickness()
    ic.update_water_line()
    ic.update_column_temperatures(temp)
    ic.update_total_column_height()
    ic.set_surface_temperature(temp)

    return ic
コード例 #37
0
def get_data(from_date=None, to_date=None, registration_types=None, reg_ids=None, region_ids=None, location_id=None,
        observer_ids=None, observer_nick=None, observer_competence=None, group_id=None,
        output='List', geohazard_tids=None, lang_key=1):
    """Gets data from regObs webapi. Each observation returned as a dictionary in a list.

    :param from_date:           [string] 'yyyy-mm-dd'. Result includes from date.
    :param to_date:             [string] 'yyyy-mm-dd'. Result includes to date.
    :param lang_key:            [int] Default 1 gives Norwegian.
    :param reg_id:              [int or list of ints] Default None gives all.
    :param registration_types:  [string or list of strings] Default None gives all.
    :param region_ids:          [int or list of ints]
    :param geo_hazards:         [int or list of ints] Default None gives all.
    :param observer_id:         [int or list of ints] Default None gives all.
    :param observer_nick        [string] Part of a observer nick name
    :param observer_competence  [int or list of int] as given in ComtetanceLevelKDV
    :param group_id:            [int]
    :param location_id:         [int]
    :param output:              [string] 'Nested' collects all observations in one regid in one entry (defult for webapi).
                                         'List' is a flatt structure with one entry pr observation type.
                                         'Count nest' makes one request and picks out info on total matches
                                         'Count list' counts every from in every observation

    :return:                    [list or int] Depending on output requested.

    """

    # If resources isn't a list, make it so
    if not isinstance(registration_types, list):
        registration_types = [registration_types]

    if not isinstance(region_ids, list):
        region_ids = [region_ids]

    if not isinstance(geohazard_tids, list):
        geohazard_tids = [geohazard_tids]

    # regObs weabapi does not support multiple ObserverIDs and RegIDs. Making it so.
    if not isinstance(observer_ids, list):
        observer_ids = [observer_ids]

    if not isinstance(reg_ids, list):
            reg_ids = [reg_ids]

    # if output requested is 'Count' a number is expected, else a list og observations
    all_data = []

    for reg_id in reg_ids:
        for observer_id in observer_ids:

            data = _make_one_request(
                from_date=from_date, to_date=to_date, lang_key=lang_key, reg_id=reg_id,
                registration_types=registration_types, region_ids=region_ids, geohazard_tids=geohazard_tids,
                observer_id=observer_id, observer_nick=observer_nick, observer_competence=observer_competence, group_id=group_id, location_id=location_id, output=output)

            all_data += data

    # Output 'Nested' is the structure returned from webapi. All observations on the same reg_id are grouped to one list item.
    # Output 'List' all observation elements are made a separate item on list.
    # Sums of each are available as 'Count list. and 'Count nest'.
    if output == 'Count nest':
        return sum(all_data)

    # data sorted with ascending observation time
    all_data = sorted(all_data, key=lambda d: d['DtObsTime'])
    if output == 'Nested':
        return all_data

    elif output == 'List' or output == 'Count list':
        listed_data = []

        for d in all_data:
            for o in d['Registrations']:
                listed_data.append({**d, **o})
            for p in d['Pictures']:
                p['RegistrationName'] = 'Bilde'
                listed_data.append({**d, **p})

        if output == 'List':
            return listed_data
        if output == 'Count list':
            return len(listed_data)

    else:
        ml.log_and_print('getobservations.py -> get_data: Unsupported output type.')
        return None
コード例 #38
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_forecast_region_for_coordinate(utm33x, utm33y, year):
    """Maps an observation to the forecast regions used at the time the observation was made

    :param utm33x:
    :param utm33y:
    :param year:
    :return region_id, region_name:

    ## osx requires
    pip install pyshp
    pip install shapely

    ## windows requires
    pip install pyshp
    conda config --add channels conda-forge
    conda install shapely

    Helpful pages
    https://pypi.python.org/pypi/pyshp
    https://chrishavlin.wordpress.com/2016/11/16/shapefiles-tutorial/
    https://streamhacker.com/2010/03/23/python-point-in-polygon-shapely/
    """

    from shapely import geometry as gty
    import shapefile as sf

    if year == '2012-13':
        # varsling startet januar 2013
        file_name = 'VarslingsOmrF_fra_2013_jan'
        id_offset = 100
    elif year == '2013-14' or year == '2014-15' or year == '2015-16':
        # Svartisen (131) was started in april 2014
        # Nordenskioldland (130) and Hallingdal (132) was established in april 2014, but not used before the season after.
        # Salten (133) was started in mars 2015.
        # We tested Nordeskioldland (130) in may 2015.
        file_name = 'VarslingsOmrF_fra_2014_mars'
        id_offset = 100
    elif year == '2016-17' or year == '2017-18':
        # total makeover season 2016-17. Introducing A and B regions. Ids at 3000.
        file_name = 'VarslingsOmrF_fra_2016_des'
        id_offset = 0
    else:
        ml.log_and_print(
            '[warning] getmisc.py -> get_forecast_region_for_coordinate: No valid year given.'
        )
        file_name = 'VarslingsOmrF_fra_2016_des'
        id_offset = 0

    shape_file = sf.Reader('{0}{1}'.format(env.forecast_region_shapes,
                                           file_name))
    point = gty.MultiPoint([(utm33x, utm33y)]).convex_hull
    count = 0
    region = None

    for shape in shape_file.iterShapes():
        poly = gty.Polygon(shape.points)
        if point.within(poly):
            region = shape_file.records()[count]
        count += 1

    # records = sf.records()

    if region is None:
        region_name = 'Ikke gitt'
        region_id = 0
    else:
        region_name = region[1]
        region_id = region[0] + id_offset

    return region_id, region_name
コード例 #39
0
def _reg_types_dict(registration_tids=None):
    """Method maps single RegistrationTID values to the query dictionary used in regObs webapi

    :param registration_tids:       [int or list of int] Definition given below
    :return:


    Registration IDs and names
    10	Fritekst
    11	Ulykke/hendelse
    12	Bilde
    13	Faretegn
    -14	Skader
    21	Vær
    22	Snødekke
    23	Snøprofil
    -24	Skredfaretegn
    25	Stabilitetstest
    26	Skredhendelse
    27	Observert skredaktivitet(2011)
    28	Skredfarevurdering (2012)
    -29	Svakt lag
    30	Skredfarevurdering (2013)
    31	Skredfarevurdering
    32	Skredproblem
    33	Skredaktivitet
    40	Snøskredvarsel
    50	Istykkelse
    51	Isdekningsgrad
    61	Vannstand (2017)
    62	Vannstand
    71	Skredhendelse
    80	Hendelser   Grupperings type - Hendelser
    81	Skred og faretegn   Grupperings type - Skred og faretegn
    82	Snødekke og vær Grupperings type - Snødekke og vær
    83	Vurderinger og problemer    Grupperings type - Vurderinger og problemer

    """

    # If resources isn't a list, make it so
    if not isinstance(registration_tids, list):
        registration_tids = [registration_tids]

    registration_dicts = []
    for registration_tid in registration_tids:
        if registration_tid is None:
            return None
        elif registration_tid == 10:  # Fritekst
            registration_dicts.append({'Id': 10, 'SubTypes': []})
        elif registration_tid == 11:  # Ulykke/hendelse
            registration_dicts.append({'Id': 80, 'SubTypes': [11]})
        elif registration_tid == 13:  # Faretegn
            registration_dicts.append({'Id': 81, 'SubTypes': [13]})
        elif registration_tid == 21:  # Vær
            registration_dicts.append({'Id': 82, 'SubTypes': [21]})
        elif registration_tid == 22:  # Snødekke
            registration_dicts.append({'Id': 82, 'SubTypes': [22]})
        elif registration_tid == 23:  # Snøprofil
            registration_dicts.append({'Id': 82, 'SubTypes': [23]})
        elif registration_tid == 25:  # Stabilitetstest
            registration_dicts.append({'Id': 82, 'SubTypes': [25]})
        elif registration_tid == 26:  # Skredhendelse
            registration_dicts.append({'Id': 81, 'SubTypes': [26]})
        elif registration_tid == 27:  # Skredaktivitet(2011)
            registration_dicts.append({'Id': 81, 'SubTypes': [27]})
        elif registration_tid == 28:  # Skredfarevurdering (2012)
            registration_dicts.append({'Id': 83, 'SubTypes': [28]})
        elif registration_tid == 30:  # Skredfarevurdering (2013)
            registration_dicts.append({'Id': 83, 'SubTypes': [30]})
        elif registration_tid == 31:  # Skredfarevurdering
            registration_dicts.append({'Id': 83, 'SubTypes': [31]})
        elif registration_tid == 32:  # Skredproblem
            registration_dicts.append({'Id': 83, 'SubTypes': [32]})
        elif registration_tid == 33:  # Skredaktivitet
            registration_dicts.append({'Id': 81, 'SubTypes': [33]})
        elif registration_tid == 50:  # Istykkelse
            registration_dicts.append({'Id': 50, 'SubTypes': []})
        elif registration_tid == 51:  # Isdekningsgrad
            registration_dicts.append({'Id': 51, 'SubTypes': []})
        else:
            ml.log_and_print('getobservations.py -> _reg_types_dict: RegistrationTID {0} not supported (yet).'.format(registration_tid))

    return registration_dicts
コード例 #40
0
ファイル: getmisc.py プロジェクト: NVE/varsomdata
def get_trip(from_date, to_date, geohazard_tid=None, output='List'):
    """Gets trip information and returns list of class Trip objects. Optionally .csv file is written to project
    output folder.

    :param from_date:       [date] A query returns [from_date, to_date>
    :param to_date:         [date] A query returns [from_date, to_date>
    :param geohazard_tid:   [int] 10 is snow, 20,30,40 are dirt, 60 is water and 70 is ice
    :param output:          [string]
    :return:                [list] of class Trip objects

    <entry>
        <id>http://api.nve.no/hydrology/RegObs/v0.9.9/OData.svc/Trip(1)</id>
        <category term="RegObsModel.Trip" scheme="http://schemas.microsoft.com/ado/2007/08/dataservices/scheme" />
        <link rel="edit" title="Trip" href="Trip(1)" /><link rel="http://schemas.microsoft.com/ado/2007/08/dataservices/related/Observer" type="application/atom+xml;type=entry" title="Observer" href="Trip(1)/Observer" />
        <link rel="http://schemas.microsoft.com/ado/2007/08/dataservices/related/ObsLocation" type="application/atom+xml;type=entry" title="ObsLocation" href="Trip(1)/ObsLocation" />
        <title />
        <updated>2015-12-30T20:09:16Z</updated>
        <author>
            <name />
        </author>
        <content type="application/xml">
            <m:properties>
                <d:TripID m:type="Edm.Int32">1</d:TripID>
                <d:ObserverID m:type="Edm.Int32">1077</d:ObserverID>
                <d:ObsLocationID m:type="Edm.Int32">19063</d:ObsLocationID>
                <d:GeoHazardTID m:type="Edm.Int16">10</d:GeoHazardTID>
                <d:TripTypeTID m:type="Edm.Int32">20</d:TripTypeTID>
                <d:ObservationExpectedTime m:type="Edm.DateTime">2015-01-09T11:00:00</d:ObservationExpectedTime>
                <d:Comment></d:Comment>
                <d:IsFinished m:type="Edm.Boolean">true</d:IsFinished>
                <d:TripRegistrationTime m:type="Edm.DateTime">2015-01-09T09:11:59.263</d:TripRegistrationTime>
                <d:TripFinishedTime m:type="Edm.DateTime">2015-01-09T09:18:36.653</d:TripFinishedTime>
                <d:DeviceID m:type="Edm.Guid">835f5e39-a73a-48d3-2c7f-3c81c0492b87</d:DeviceID>
            </m:properties>
        </content>
    </entry>
    """

    odata_filter = ""

    if geohazard_tid is not None:
        odata_filter += "GeoHazardTID eq {0} and ".format(geohazard_tid)

    odata_filter += "TripRegistrationTime gt datetime'{0}' and TripRegistrationTime lt datetime'{1}'".format(
        from_date, to_date)

    url = "http://api.nve.no/hydrology/regobs/{0}/Odata.svc/Trip/?$filter={1}&$format=json".format(
        env.odata_version, odata_filter)

    ml.log_and_print('[info] getmisc.py -> get_trip: ..to {0}'.format(url),
                     print_it=True)

    result = requests.get(url).json()
    data = result['d']['results']

    # if more than 1000 elements are requested, odata truncates data to 1000. We do more requests
    if len(result) == 1000:
        time_delta = to_date - from_date
        date_in_middle = from_date + time_delta / 2
        data_out = get_trip(from_date,
                            date_in_middle, geohazard_tid) + get_trip(
                                date_in_middle, to_date, geohazard_tid)
    else:
        data_out = [Trip(d) for d in data]

    if output == 'List':
        return data_out
    elif output == 'csv':
        with open(
                '{0}trips {1}-{2}.csv'.format(env.output_folder,
                                              from_date.strftime('%Y%m%d'),
                                              to_date.strftime('%Y%m%d')),
                'wb') as f:
            w = csv.DictWriter(f, data_out[0].__dict__.keys(), delimiter=";")
            w.writeheader()
            for t in data_out:
                w.writerow(t.__dict__)
        return data_out
コード例 #41
0
def get_ice_thickness_observations(year, reset_and_get_new=False):
    """Gets all the observed ice thickness (RegistrationTID = 50) from regObs for one year.

    The inner workings of the method:
    1.   We have an option of resetting local storage (delete pickle) and thus forcing the get_new.
    2.1  Try opening a pickle, if it doesnt exist, an exception is thrown and we get new data.
    2.2  If the requested data is from a previous season, no changes are expected, so load the pickle
         without adding the last observations registered in regObs. Anyway, don't get new data.
    2.3  If the requested data is from this season, set request from_date to the last modified
         date of the pickle and 7 days past that. Add these last obs to the pickle data, and thus it is not
         necessary to get new.
    3.   If get new, it gets all new data for the season.
    4.   Else, load pickle and if some last obs are to be added, do so.

    :param year:                [string] Eg '2017-18'
    :param reset_and_get_new:   [bool]
    :return:                    ice_thickeness_obs_dict
    """

    log_referance = 'getregobsdata.py -> get_ice_thickness_observations'
    pickle_file_name = '{0}get_ice_thickness_observations_{1}.pickle'.format(se.local_storage, year)

    # 1. Remove pickle if it exists, forcing the get_new
    if reset_and_get_new:
        try:
            os.remove(pickle_file_name)
        except OSError:
            pass

    from_date, to_date = gm.get_dates_from_year(year)
    add_last_obs = None
    get_new = None

    try:
        mtime = os.path.getmtime(pickle_file_name)
        last_modified_date = dt.datetime.fromtimestamp(mtime).date()

        # if file newer than the season (that is, if this is historical data), load it without requesting new.
        dt_to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()
        if last_modified_date > dt_to_date:
            add_last_obs = False
        else:
            add_last_obs = True
            to_date = dt.date.today()
            from_date = last_modified_date - dt.timedelta(days=7)

        get_new = False

    except OSError:
        # file does not exists, so get_new.
        ml.log_and_print("{0}: No matching pickle found, getting new data.".format(log_referance))
        get_new = True

    if get_new:
        ml.log_and_print('{0}: Getting new for year {1}.'.format(log_referance, year))
        ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
        ice_thickeness_obs_dict = {}

        for o in ice_thickeness_obs:
            if o['RegistrationTid'] == 50:
                ice_column = _webapi_ice_col_to_ice_class(o)
                if ice_column is not None:
                    ice_thickeness_obs_dict[o['RegId']] = ice_column

        mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)

    else:
        ice_thickeness_obs_dict = mp.unpickle_anything(pickle_file_name)

        if add_last_obs:
            ml.log_and_print("{0}: Adding observations from {1} to {2}".format(log_referance, from_date, to_date))
            new_ice_thickeness_obs = get_data(from_date=from_date, to_date=to_date, registration_types=50, geohazard_tids=70)
            new_ice_thickeness_obs_dict = {}

            for o in new_ice_thickeness_obs:
                if o['RegistrationTid'] == 50:
                    ice_column = _webapi_ice_col_to_ice_class(o)
                    if ice_column is not None:
                        new_ice_thickeness_obs_dict[o['RegId']] = ice_column

            for k,v in new_ice_thickeness_obs_dict.items():
                ice_thickeness_obs_dict[k] = v

            mp.pickle_anything(ice_thickeness_obs_dict, pickle_file_name)

    return ice_thickeness_obs_dict
コード例 #42
0
def _plot_season(location_id, from_date, to_date, observed_ice, make_plots=True, plot_folder=se.plot_folder):
    """Given a location id, a time period and some observations on this location id and this method
    calculates and optionally plots the ice evolution that season. Weather data from GTS.

    It is a sub method of plot_season_for_location_id and plot_season_for_all_regobs_locations.

    :param location_id:
    :param from_date:
    :param to_date:
    :param observed_ice:
    :param make_plots:
    :param plot_folder:     [string]        Path of folder for plots.

    :return calculated_ice, observed_ice:   [list of Ice.IceColumn] observed_ice is returned as given inn.

    TODO: should accept observerd_ice=None and then query for the observations. If still missing, set icecover on to start date.
    """

    year = '{0}-{1}'.format(from_date[0:4], to_date[2:4])

    # Change dates to datetime. Some of the get data modules require datetime
    from_date = dt.datetime.strptime(from_date, '%Y-%m-%d')
    to_date = dt.datetime.strptime(to_date, '%Y-%m-%d')

    # special rule for this season.
    if year == '2018-19':
        from_date = dt.datetime(2018, 9, 1)

    # if to_date forward in time, make sure it doesnt go to far..
    if to_date > dt.datetime.now():
        to_date = dt.datetime.now() + dt.timedelta(days=7)

    x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata['UTMNorth']

    # get weather data
    gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
    gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
    gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

    # adjust grid temperature (at grid elevation) to lake elevation.
    lake_altitude = gm.get_masl_from_utm33(x, y)
    gridTempNewElevation = we.adjust_temperature_to_new_altitude(gridTemp, lake_altitude)

    # strip metadata
    temp, date = we.strip_metadata(gridTempNewElevation, get_date_times=True)
    sno = we.strip_metadata(gridSno, False)
    snotot = we.strip_metadata(gridSnoTot, False)
    cc = dp.clouds_from_precipitation(sno)

    plot_filename = '{0}_{1}.png'.format(location_id, year)
    plot_path_and_filename = '{0}{1}'.format(plot_folder, plot_filename)

    try:
        if len(observed_ice) == 0:
            calculated_ice = it.calculate_ice_cover_air_temp(ice.IceColumn(date[0], []), date, temp, sno, cc)
        else:
            calculated_ice = it.calculate_ice_cover_air_temp(copy.deepcopy(observed_ice[0]), date, temp, sno, cc)

        if make_plots:
            pts.plot_ice_cover(calculated_ice, observed_ice, date, temp, sno, snotot, plot_path_and_filename)

    except:
        # raise
        error_msg = sys.exc_info()[0]
        ml.log_and_print("[Error] calculateandplot.py -> _plot_season: {}. Could not plot {}.".format(error_msg, location_id))
        calculated_ice = None

    return calculated_ice, observed_ice, plot_filename
コード例 #43
0
def patch_weather_element_list(weather_element_list, from_date=None, to_date=None, time_step=24*60*60):
    """If the dataprovider sends data with gaps, this may patch it up. Simple patching using nearest
    neighbour values/avarages. WORKS ONLY ON 24hrs VALUES

    :param weather_element_list:
    :param from_date:
    :param to_date:
    :param time_step:
    :return: weather_element_list
    """

    log_reference = 'weatherelement.py -> patch_weather_element_list: '

    if from_date is None:
        from_date = weather_element_list[0].Date
    if to_date is None:
        to_date = weather_element_list[-1].Date

    # if dates are strings change to date times
    if isinstance(from_date, str):
        from_date = dt.datetime.strptime(from_date, '%Y-%m-%d').date()
    if isinstance(to_date, str):
        to_date = dt.datetime.strptime(to_date, '%Y-%m-%d').date()

    if isinstance(from_date, dt.datetime):
        from_date = from_date.date()
    if isinstance(to_date, dt.datetime):
        to_date = to_date.date()

    dates_range = to_date - from_date
    dates = []
    for i in range(dates_range.days + 1):
        dates.append(from_date + dt.timedelta(seconds=time_step * i))

    location_id = weather_element_list[0].LocationID
    element_id = weather_element_list[0].ElementID

    if len(weather_element_list) == len(dates):          # No patching needed
        return weather_element_list

    if len(weather_element_list)/len(dates) < 0.95:
        # on short time series the 5% missing rule is to hard.
        if len(dates) - len(weather_element_list) > 3:
            ml.log_and_print('{}More than 5% and more than 3 days missing on {} for {} during {}-{}'.format(log_reference, location_id, element_id, from_date, to_date))
            return weather_element_list

    i = 0
    j = 0

    # make sure we have a last value
    if dates[-1] > weather_element_list[-1].Date.date():
        element_value = weather_element_list[-1].Value
        dates_date_time = dt.datetime.combine(dates[-1], dt.datetime.min.time())
        weather_element_list.append(WeatherElement(location_id, dates_date_time, element_id, element_value))
        ml.log_and_print('{}Last date data missing on {} {} {}. Adding value {}.'.format(log_reference, location_id, dates_date_time, element_id, element_value))

    while i < len(dates):
        dates_date = dates[i]
        weather_date = weather_element_list[j].Date.date()

        if weather_date == dates_date:
            i += 1
            j += 1
        else:
            if j == 0:      # if the first element is missing
                element_value = weather_element_list[j].Value
                meta_data = 'First elelment missing. Copying value of second first element to first index.'
                ml.log_and_print('{}First date data missing on {} {} {}. Adding value {}.'.format(log_reference, location_id, dates_date, element_id, element_value))
                i += 1
            else:           # else add a avarage value
                element_value = (weather_element_list[j].Value + weather_element_list[j-1].Value)/2
                meta_data = 'Elelment missing. Ading avarage og values before and after.'
                ml.log_and_print('{}Date data missing on {} {} {}. Adding value {}.'.format(log_reference, location_id, dates_date, element_id, element_value))
                i += 1

            dates_date_time = dt.datetime.combine(dates_date, dt.datetime.min.time())
            we = WeatherElement(location_id, dates_date_time, element_id, element_value)
            we.Metadata['Value patched'] = meta_data
            weather_element_list.append(we)

    weather_element_list = sorted(weather_element_list, key=lambda weatherElement: weatherElement.Date)

    return weather_element_list
コード例 #44
0
def get_for_location(location_name):

    if location_name == 'Hakkloa nord 372 moh':
        location = Location(location_name)

        location.eklima_NNM = 18700
        location.nve_temp = '6.24.4'

        location.utm_north = 6671401
        location.utm_east = 259900
        location.utm_zone = 33

        location.file_name = 'Hakkloa nord'
        return location

    # elif location_name == 'Otrøvatnet v/Nystuen 971 moh':
    #     location = Location(location_name)
    #
    #     location.eklima_TAM = 54710
    #     location.eklima_SA = 54710
    #
    #     location.utm_north = 6801955
    #     location.utm_east = 132994
    #     location.utm_zone = 33
    #
    #     # Data in range 2011.10.01 til 2013.07.19
    #     location.input_file = '{0}Kyrkjestølane værdata.csv'.format(se.data_path)
    #
    #     location.file_name = 'Otrøvatnet'
    #     return location

    elif location_name == 'Skoddebergvatnet - nord 101 moh':
        location = Location(location_name)

        # location.eklima_NNM = 87640         # Harstad Stadion
        location.nve_temp = '189.3.0'

        location.utm_north = 7612469
        location.utm_east = 593273
        location.utm_zone = 33

        location.file_name = 'Skoddebergvatnet nord'
        return location

    elif location_name == 'Giljastølsvatnet 412 moh':
        location = Location(location_name)

        #location.eklima_NNM = 43010, # Gone? Eik - Hove. Ligger lenger sør og litt inn i landet.
        location.eklima_NNM = 44560  # Sola er et alternativ

        location.utm_east = -1904
        location.utm_north = 6553573
        location.utm_zone = 33

        location.file_name = 'Giljastølsvatnet'
        return location

    elif location_name == 'Baklidammen 200 moh':
        location = Location(location_name)

        #location.eklima_NNM = 68860   # TRONDHEIM - VOLL

        location.utm_east = 266550
        location.utm_north = 7040812
        location.utm_zone = 33

        location.file_name = 'Baklidammen'
        return location

    elif location_name == 'Storvannet, 7 moh':
        location = Location(location_name)

        #location.eklima_NNM = 95350    # BANAK - østover innerst i fjorden

        location.utm_east = 821340
        location.utm_north = 7862497
        location.utm_zone = 33

        location.file_name = 'Storvannet'
        return location

    else:
        location = Location(location_name)
        try:
            odata_call = gro.get_obs_location(location_name)

            location.utm_east = odata_call['UTMEast']
            location.utm_north = odata_call['UTMNorth']
            location.utm_zone = odata_call['UTMZone']

            location.file_name = '{0}'.format(location_name.replace(",","").replace("/","").replace("\"", ""))

            return location

        except:
            ml.log_and_print('setlocationparameters.py -> get_for_location: No such location.')
コード例 #45
0
def get_ice_thickness_from_surface_temp(ic,
                                        time_step,
                                        dh_snow,
                                        temp,
                                        melt_energy=None):
    """Given surface temperature and new snow on an ice-column, ice evolution is estimated. In the simplest case
    the surface temp is estimated from air temperature. More advances approaches calculates surface temperature
    by solving er energy balance equation.

    :param ic:          Ice column at the beginning of the time step. Object containing the ice column with metadata
    :param dh_snow:     New snow in period of time step. Given as float in SI units [m]
    :param temp:        Average temperature in period of time step. Given i C as float.
    :param time_step:   In seconds. 60*60*24 = 86400 is 24hrs
    :return:            Ice column at end of time step
    """

    dh_snow = float(dh_snow)

    # step the date forward one time step. We do it initially because the variable is also used and subtracted in the following calculations.
    ic.time_step_forward(time_step)

    # Add new snow on top of the column if we have ice and snow
    # and update the slush level/buoyancy given new snow
    if len(ic.column) != 0:
        if dh_snow != 0.:
            ic.add_layer_at_index(0, ice.IceLayer(dh_snow, 'new_snow'))
        ic.update_slush_level()

    # if surface or air temperature is FREEZING
    if temp < const.temp_f:

        # If no ice, freeze water to ice
        if len(ic.column) == 0:
            # The heat flux equation gives how much water will freeze. U_total for the equation is estimated.
            U_total = ice.add_layer_conductance_to_total(
                None, const.k_black_ice, 0, 10)
            dh = -temp * U_total * time_step / const.rho_water / const.L_fusion
            ic.add_layer_at_index(0, ice.IceLayer(dh, 'black_ice'))
            pass

        else:
            # Declaration of total conductance of layers above freezing layer
            U_total = None
            i = 0
            while time_step > 0 and i <= len(ic.column) - 1:

                # If the layer is a solid, it only adds to the total isolation. Unless it is the last and water is frozen to ice.
                if (ic.column[i].get_enum()) > 9:
                    U_total = ice.add_layer_conductance_to_total(
                        U_total, ic.column[i].conductivity,
                        ic.column[i].height, ic.column[i].get_enum())

                    # If the layer is the last layer of solids and thus at the bottom, we get freezing at the bottom
                    if i == len(ic.column) - 1:

                        # The heat flux equation gives how much water will freeze.
                        dh = -temp * U_total * time_step / const.rho_water / const.L_fusion
                        ic.add_layer_at_index(i + 1,
                                              ice.IceLayer(dh, 'black_ice'))
                        time_step = 0

                # Else the layer is a slush layer above or in the ice column and it will freeze fully or partially.
                # Note, we do not freeze slush in the same time step it occurs.
                elif not ic.in_slush_event:

                    # If the total conductance is None, we are dealing with the top layer and a surface/thin ice conductance mut be defined.
                    if U_total is None:
                        U_total = ice.add_layer_conductance_to_total(
                            None, const.k_slush_ice, 0, 11)

                    # Only the water part in the slush freezes
                    dh = -temp * U_total * time_step / const.rho_water / const.L_fusion / (
                        1 - const.part_ice_in_slush)

                    # If a layer totaly freezes during the tieme period, the rest of the time will be used to freeze a layer further down.
                    if ic.column[i].height < dh:

                        ic.column[i].set_type('slush_ice')

                        # The heat flux equation sorted for time
                        time_step_used = ic.column[
                            i].height * const.rho_water * const.L_fusion * (
                                1 - const.part_ice_in_slush) / -temp / U_total
                        time_step = time_step - time_step_used

                        # Layer height increases when water in the layer freezes
                        ic.column[i].height += ic.column[i].height * (
                            1 - const.part_ice_in_slush) * (
                                (const.rho_water - const.rho_slush_ice) /
                                const.rho_slush_ice)

                        # Update conductance
                        U_total = ice.add_layer_conductance_to_total(
                            U_total, ic.column[i].conductivity,
                            ic.column[i].height, ic.column[i].get_enum())

                    # Else all energy is used to freeze the layer only partially
                    else:
                        # The thickness that remains slush
                        ic.column[i].height -= dh

                        # dh has frozen to slush ice. Layer height increases when water in the layer freezes.
                        dh += dh * (1 - const.part_ice_in_slush) * (
                            (const.rho_water - const.rho_slush_ice) /
                            const.rho_slush_ice)
                        ic.add_layer_at_index(i, ice.IceLayer(dh, 'slush_ice'))

                        # Nothing more to freeze
                        time_step = 0

                # Slush event has happened and this is the first time step after the slush event. Do not create ice in the first time step.
                else:
                    # ml.log_and_print("[info] icethickness.py -> get_ice_thickness_from_surface_temp: No freezing event in the current time step due to slush event.", log_it=False, print_it=True)
                    ic.in_slush_event = False
                    # If we don't set time step to 0, layers further down will freeze.
                    time_step = 0

                # Go to next ice layer
                i += 1

    # if surface or air temperature is MELTING
    else:
        # In case surface temperatures are above 0C (when air temp is used to calculate ice evolution) there
        # should not be submitted a energy term from the energy balance calculations (melt_energy = None).
        if temp > 0.:
            # all melting is made by simple degree day model using different calibration constants for snow,
            # slush ice and black ice melting only effects the top layer (index = 0)
            while time_step > 0 and len(ic.column) > 0:
                if ic.column[0].type == 'water':
                    ic.remove_layer_at_index(0)
                else:
                    if ic.column[0].get_enum() >= 20:  # snow
                        meltingcoeff = const.meltingcoeff_snow
                    elif ic.column[0].type == 'slush_ice':
                        meltingcoeff = const.meltingcoeff_slush_ice
                    elif ic.column[0].type == 'slush':
                        meltingcoeff = const.meltingcoeff_slush
                    elif ic.column[0].type == 'black_ice':
                        meltingcoeff = const.meltingcoeff_black_ice
                    else:
                        ml.log_and_print(
                            "[info] icethickness.py -> get_ice_thickness_from_surface_temp: Melting on unknown layer type: {0}. Using slush_ice coeff."
                            .format(ic.column[0].type))
                        meltingcoeff = const.meltingcoeff_slush_ice

                    # degree day melting. I have separated the time factor from the melting coefficiant.
                    dh = meltingcoeff * time_step * (temp - const.temp_f)

                    # if layer is thinner than total melting the layer is removed and the rest of melting occurs
                    # in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
                    if ic.column[0].height < -dh:
                        time_step_used = ic.column[0].height / -dh * time_step
                        ic.remove_layer_at_index(0)
                        time_step = time_step - time_step_used

                    # the layer is only partly melted during this time_step
                    else:
                        ic.column[0].height = ic.column[0].height + dh
                        time_step = 0

        # In case surface temp is calculated from energy balance, surface temp is never above 0C, but if we have
        # melting and thus melt_energy is not None and temp == 0.
        elif melt_energy is not None:
            while time_step > 0 and len(ic.column) > 0:
                if ic.column[0].type == 'water':
                    ic.remove_layer_at_index(0)
                else:
                    # energy available to melt used with latent heat of fusion (delta_h = Q/L/rho)
                    L_ice = const.L_fusion / 1000.  # Joule to Kilo Joule
                    dh = melt_energy / L_ice / ic.column[
                        0].density * time_step / 24 / 60 / 60

                    # if layer is thinner than total melting the layer is removed and the rest of melting occurs
                    # in the layer below for the reminder of time. melting (dh) and time are proportional in the degreeday equation
                    if ic.column[0].height < -dh:
                        time_step_used = ic.column[0].height / -dh * time_step
                        ic.remove_layer_at_index(0)
                        time_step = time_step - time_step_used

                    # the layer is only partly melted during this time_step
                    else:
                        ic.column[0].height = ic.column[0].height + dh
                        time_step = 0

        else:
            ml.log_and_print(
                "[info] icethickness.py -> get_ice_thickness_from_surface_temp: Need either energy or positive temperatures in model to melt snow and ice."
            )

    ic.merge_and_remove_excess_layers()
    ic.merge_snow_layers_and_compress(temp)
    ic.update_draft_thickness()
    ic.update_water_line()
    ic.update_column_temperatures(temp)
    ic.update_total_column_height()
    ic.set_surface_temperature(temp)

    return ic
コード例 #46
0
def _plot_season(location_id,
                 from_date,
                 to_date,
                 observed_ice,
                 make_plots=True,
                 plot_folder=se.plot_folder):
    """Given a location id, a time period and some observations on this location id and this method
    calculates and optionally plots the ice evolution that season. Weather data from GTS.

    It is a sub method of plot_season_for_location_id and plot_season_for_all_regobs_locations.

    :param location_id:
    :param from_date:
    :param to_date:
    :param observed_ice:
    :param make_plots:
    :param plot_folder:     [string]        Path of folder for plots.

    :return calculated_ice, observed_ice:   [list of Ice.IceColumn] observed_ice is returned as given inn.

    TODO: should accept observerd_ice=None and then query for the observations. If still missing, set icecover on to start date.
    """

    year = '{0}-{1}'.format(from_date[0:4], to_date[2:4])

    # Change dates to datetime. Some of the get data modules require datetime
    from_date = dt.datetime.strptime(from_date, '%Y-%m-%d')
    to_date = dt.datetime.strptime(to_date, '%Y-%m-%d')

    # special rule for this season.
    if year == '2018-19':
        from_date = dt.datetime(2018, 9, 1)

    # if to_date forward in time, make sure it doesnt go to far..
    if to_date > dt.datetime.now():
        to_date = dt.datetime.now() + dt.timedelta(days=7)

    x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata[
        'UTMNorth']

    # get weather data
    gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
    gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
    gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

    # adjust grid temperature (at grid elevation) to lake elevation.
    lake_altitude = gm.get_masl_from_utm33(x, y)
    gridTempNewElevation = we.adjust_temperature_to_new_altitude(
        gridTemp, lake_altitude)

    # strip metadata
    temp, date = we.strip_metadata(gridTempNewElevation, get_date_times=True)
    sno = we.strip_metadata(gridSno, False)
    snotot = we.strip_metadata(gridSnoTot, False)
    cc = dp.clouds_from_precipitation(sno)

    plot_filename = '{0}_{1}.png'.format(location_id, year)
    plot_path_and_filename = '{0}{1}'.format(plot_folder, plot_filename)

    try:
        if len(observed_ice) == 0:
            calculated_ice = it.calculate_ice_cover_air_temp(
                ice.IceColumn(date[0], []), date, temp, sno, cc)
        else:
            calculated_ice = it.calculate_ice_cover_air_temp(
                copy.deepcopy(observed_ice[0]), date, temp, sno, cc)

        if make_plots:
            pts.plot_ice_cover(calculated_ice, observed_ice, date, temp, sno,
                               snotot, plot_path_and_filename)

    except:
        # raise
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            "[Error] calculateandplot.py -> _plot_season: {}. Could not plot {}."
            .format(error_msg, location_id))
        calculated_ice = None

    return calculated_ice, observed_ice, plot_filename
コード例 #47
0
    def map_to_eaws_problems(self):
        """The EAWS problems are:

        eaws_problems = ['New snow',
                 'Wind-drifted snow',
                 'Persistent waek layers',
                 'Wet snow',
                 'Gliding snow']

        Mapping forecasts to EAWS problems: For the forecasts, we have a classification of avalanche
        problem type. These are mapped to EAWS problems in the following way:

        Loose dry avalanches --> New snow
        Loose wet avalanches --> Wet snow
        Storm slab avalanches --> New snow
        Wind slab avalanches --> Wind-drifted snow
        New snow --> New snow
        Persistent slab avalanches --> Persistent weak layers
        Persistent weak layer --> Persistent weak layers
        Persistent deep slab avalanches --> Persistent weak layers
        Wet snow --> Wet snow
        Wet slab avalanches --> Wet snow
        Glide avalanches --> Gliding snow


        Mapping observations to EAWS problems: For the observations, we don’t have a classification of
        avalanche problems yet. The mapping is therefore somewhat more complex.

        Some avalanche types give the avalanche problem directly:

        Loose dry avalanche --> New snow
        Loose wet avalanche --> Wet snow
        Wet slab avalanche --> Wet snow
        Glide avalanche --> Gliding snow

        The expected avalanche type does not always give the problem. Neither is it mandatory for an observer
        to include avalanche type in the observation. In some cases, weak layers correspond directly to an
        avalanche problem:

        Poor bonding between layers in wind deposited snow --> Wind-drifted snow
        Wet snow / melting near the ground --> Gliding snow
        Wet snow on the surface --> Wet snow
        Water pooling in / above snow layers --> Wet snow

        The cases of dry slabs require some more inspection. First, if the slab over the weak layer is
        soft the avalanche problems are classified as “new snow” problems. Else, if it has a buried weak
        layer of surface hoar or of faceted snow the problem is classified as a persistent weak layer.


        :return:
        """

        # problem_tid is available in the forecasts.
        problem_tid_to_eaws_problems = {
            0: 'Not given',
            3: 'New snow',  # Loose dry avalanches
            5: 'Wet snow',  # Loose wet avalanches
            7: 'New snow',  # Storm slab avlanches
            10: 'Wind-drifted snow',  # Wind slab avalanches
            20: 'New snow',  # New snow
            30: 'Persistent weak layers',  # Persistent slab avalanches
            35: 'Persistent weak layers',  # Persistent weak layer
            37: 'Persistent weak layers',  # Persistent deep slab avalanches
            40: 'Wet snow',  # Wet snow
            45: 'Wet snow',  # Wet slab avalanches
            50: 'Gliding snow'
        }  # Glide avalanches

        # AvalancheExtKDV holds information on avalanche type (self.aval_type)
        # id40:Corice and id30:Slush flow not included
        # id20:Dry slab in not uniquely mapped to an avalanche problem
        avalanche_ext_tid_to_eaws_problems = {
            10: 'New snow',  # Loose dry avalanche
            15: 'Wet snow',  # Loose wet avalanche
            # 20: ,              # Dry slab avalanche
            25: 'Wet snow',  # Wet slab avalanche
            27: 'Gliding snow'
        }  # Glide avalanche
        # 30: ,              # Slush avalanche
        # 40: ,              # Cornice

        aval_cause_to_eaws_problems = {
            # 10: ,   # Buried weak layer of new snow
            # 11: ,   # Buried weak layer of surface hoar
            # 12: ,   # Buried weak layer of graupel
            # 13: ,   # Buried weak layer of faceted snow near surface
            # 14: ,   # Poor bonding between crust and overlying snow
            15:
            'Wind-drifted snow',  # Poor bonding between layers in wind deposited snow
            # 16: ,   # Buried weak layer of faceted snow near the ground
            # 17: ,   # Buried weak layer of faceted snow near vegetation
            # 18: ,   # Buried weak layer of faceted snow above a crust
            # 19: ,   # Buried weak layer of faceted snow beneath a crust
            20: 'Gliding snow',  # Wet snow / melting near the ground
            21: 'Wet snow',  # Wet snow on the surface
            22: 'Wet snow'
        }  # Water pooling in / above snow layers
        # 23: ,   # Water - saturated snow
        # 24: ,   # Loose snow
        # 25: ,   # Rain / rise in temperature / solar heating

        self.eaws_problem = None

        if self.source == 'Observation':
            # first try some avalanche types that are uniquely connected to eaws problem
            if self.aval_type_tid in avalanche_ext_tid_to_eaws_problems.keys():
                self.eaws_problem = avalanche_ext_tid_to_eaws_problems[
                    self.aval_type_tid]

            # then try some causes that are uniquely linked to eaws problems
            if self.eaws_problem is None:
                if self.cause_tid in aval_cause_to_eaws_problems.keys():
                    self.eaws_problem = aval_cause_to_eaws_problems[
                        self.cause_tid]

            # if eaws problem still none, try some cases of dry slabs
            if self.eaws_problem is None:
                if self.aval_type_tid == 20:
                    if self.cause_tid in [10, 14]:
                        # only the AvalancheEvalProblem2 table has case attributes
                        if self.regobs_table == 'AvalancheEvalProblem2':
                            if self.cause_attribute_soft_tid > 0:
                                self.eaws_problem = 'New snow'
                            else:
                                self.eaws_problem = 'Wind-drifted snow'
                                # self.eaws_problem = None
                    if self.cause_tid in [11, 13, 16, 17, 18, 19]:
                        self.eaws_problem = 'Persistent weak layers'

        elif self.source == 'Forecast':
            if self.problem_tid is not None:
                self.eaws_problem = problem_tid_to_eaws_problems[
                    self.problem_tid]

        else:
            ml.log_and_print(
                'getproblems.py -> AvalancheProblem.map_to_eaws_problems: Unknown source.'
            )
コード例 #48
0
def calculate_and_plot_location(location_name,
                                from_date,
                                to_date,
                                sub_plot_folder='',
                                make_plots=True,
                                return_values=False):
    """ due to get_all_season_ice returns data grouped be location_id
    For a given LocationName in regObs calculate the ice cover between two dates. Optional, make plots
    and/or return the calculations and observations for this location. Different sources for weather data
    may be given, chartserver grid is default.

    :param location_name:
    :param from_date:               [String] 'yyyy-mm-dd'
    :param to_date:                 [String] 'yyyy-mm-dd'
    :param sub_plot_folder:
    :param make_plots:
    :param return_values:           [bool]  If true the calculated and observed data is returned
    """

    loc = slp.get_for_location(location_name)
    year = '{0}-{1}'.format(from_date[0:4], to_date[2:4])
    lake_file_name = '{0} {1}'.format(
        fe.make_standard_file_name(loc.file_name), year)
    observed_ice = gro.get_observations_on_location_id(loc.regobs_location_id,
                                                       year)

    # Change dates to datetime. Some of the getdata modules require datetime
    from_date = dt.datetime.strptime(from_date, '%Y-%m-%d')
    to_date = dt.datetime.strptime(to_date, '%Y-%m-%d')

    # special rule for this season.
    if year == '2018-19':
        from_date = dt.datetime(2017, 10, 15)

    # if to_date forward in time, make sure it doesnt go to far..
    if to_date > dt.datetime.now():
        to_date = dt.datetime.now() + dt.timedelta(days=7)

    if loc.weather_data_source == 'eKlima':
        wsTemp = gws.getMetData(loc.eklima_TAM, 'TAM', from_date, to_date, 0,
                                'list')
        temp, date = we.strip_metadata(wsTemp, True)

        wsSno = gws.getMetData(loc.eklima_SA, 'SA', from_date, to_date, 0,
                               'list')
        snotot = we.strip_metadata(wsSno)
        sno = dp.delta_snow_from_total_snow(snotot)

        # Clouds. If not from met.no it is parametrised from precipitation.
        if loc.eklima_NNM:
            wsCC = gws.getMetData(loc.eklima_NNM, 'NNM', from_date, to_date, 0,
                                  'list')
            cc = we.strip_metadata(wsCC)
        else:
            cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} eklima.png'.format(
            se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'grid':
        x, y = loc.utm_east, loc.utm_north

        gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
        gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
        gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

        temp, date = we.strip_metadata(gridTemp, get_date_times=True)
        sno = we.strip_metadata(gridSno, False)
        snotot = we.strip_metadata(gridSnoTot, False)

        if loc.eklima_NNM:
            wsCC = gws.getMetData(loc.eklima_NNM, 'NNM', from_date, to_date, 0,
                                  'list')
            cc = we.strip_metadata(wsCC)
        else:
            cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} grid.png'.format(
            se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'nve':
        x, y = loc.utm_east, loc.utm_north

        # Temp from NVE station or grid if not.
        if loc.nve_temp:
            temp_obj = gcsd.getStationdata(loc.nve_temp,
                                           '17.1',
                                           from_date,
                                           to_date,
                                           timeseries_type=0)
        else:
            temp_obj = gcsd.getGriddata(x, y, 'tm', from_date, to_date)
        temp, date = we.strip_metadata(temp_obj, get_date_times=True)

        # Snow from NVE station or grid if not.
        if loc.nve_snow:
            snotot_obj = gcsd.getStationdata(loc.nve_snow,
                                             '2002.1',
                                             from_date,
                                             to_date,
                                             timeseries_type=0)
            snotot = we.strip_metadata(snotot_obj)
            sno = dp.delta_snow_from_total_snow(snotot_obj)
        else:
            snotot_obj = gcsd.getGriddata(x,
                                          y,
                                          'sd',
                                          from_date,
                                          to_date,
                                          timeseries_type=0)
            sno_obj = gcsd.getGriddata(x,
                                       y,
                                       'fsw',
                                       from_date,
                                       to_date,
                                       timeseries_type=0)
            snotot = we.strip_metadata(snotot_obj)
            sno = we.strip_metadata(sno_obj)

        # Clouds. If not from met.no it is parametrised from precipitation.
        if loc.eklima_NNM:
            cc_obj = gws.getMetData(18700, 'NNM', from_date, to_date, 0,
                                    'list')
        else:
            cc_obj = dp.clouds_from_precipitation(sno)
        cc = we.strip_metadata(cc_obj)

        plot_filename = '{0}{1} nve.png'.format(
            se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'file':
        date, temp, sno, snotot = gfd.read_weather(from_date, to_date,
                                                   loc.input_file)
        cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} file.png'.format(
            se.plot_folder + sub_plot_folder, lake_file_name)

    else:
        ml.log_and_print(
            "[Error] runicethickness -> calculate_and_plot_location: Invalid scource for weather data."
        )
        return

    try:
        if len(observed_ice) == 0:
            calculated_ice = it.calculate_ice_cover_air_temp(
                ice.IceColumn(date[0], []), date, temp, sno, cc)
        else:
            calculated_ice = it.calculate_ice_cover_air_temp(
                copy.deepcopy(observed_ice[0]), date, temp, sno, cc)

        if make_plots:
            pts.plot_ice_cover(calculated_ice, observed_ice, date, temp, sno,
                               snotot, plot_filename)

    except:
        error_msg = sys.exc_info()[0]
        ml.log_and_print(
            "[Error] calculateandplot.py -> calculate_and_plot_location: {}. Could not plot {}."
            .format(error_msg, location_name))
        calculated_ice = None

    if return_values:
        return calculated_ice, observed_ice
コード例 #49
0
def plot_season_for_all_regobs_locations(year='2018-19', calculate_new=False, get_new_obs=False, make_plots=False, delete_old_plots=False):
    """Method specialized for scheduled plotting for iskart.no.
    Method makes a season plot for all ObsLocations in regObs where we have a first ice date.

    It may take some time to plot. 250 lakes for a season and for each plot weather params are requested from the GTS.

    The workings of the method:
    1.  get all locations ids and belonging observations where we have first ice.
    2.1 if calculate new, empty sesong folder and pickle in local storage and calculate (and make plots if requested).
    2.2 Make metadata json for showing files on iskart.no
    3.  All calculations are compared to observed data in scatter plot.

    :param year:                [String] Season for plotting. eg: '2016-17'
    :param calculate_new:       [bool] Calculate new ice thicks. If false only make the seasonal scatter.
    :param get_new_obs:         [bool]
    :param make_plots:          [bool]  If False all calculations are made, but only the scatter comparison against observatiosn is ploted
    :param delete_old_plots:    [bool]  If True all former plots and pickles are removed.
    """

    pickle_file_name_and_path = '{0}all_calculated_ice_{1}.pickle'.format(se.local_storage, year)
    location_id_metadata_json = '{}location_id_metadata.json'.format(se.sesong_plots_folder)

    if calculate_new:
        if delete_old_plots:
            # Empty the sesong plot folder
            for file in os.listdir(se.sesong_plots_folder):
                file_path = os.path.join(se.sesong_plots_folder, file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except OSError:
                    pass

            # remove pickle old data - because we are getting new
            try:
                os.remove(pickle_file_name_and_path)
            except OSError:
                pass

        all_observations = gro.get_all_season_ice(year, get_new=get_new_obs)
        from_date, to_date = gm.get_dates_from_year(year)
        all_calculated = {}
        all_observed = {}
        location_id_metadata = {}

        for location_id, observed_ice in all_observations.items():
            try:
                calculated, observed, plot_filename = _plot_season(
                    location_id, from_date, to_date, observed_ice, make_plots=make_plots, plot_folder=se.sesong_plots_folder)
                all_calculated[location_id] = calculated
                all_observed[location_id] = observed
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print("[error] calculateandplot.py -> plot_season_for_all_regobs_locations: Error making plot for {}".format(error_msg, location_id))

            # Make the json with metadata needed for iskart.no. Add only if the plot was made and thus file exists.
            if os.path.isfile(se.sesong_plots_folder + plot_filename):

                region_name = observed_ice[0].metadata['OriginalObject']['ForecastRegionName']
                if not region_name:
                    region_name = 'Ukjent region'
                lake_id = observed_ice[0].metadata['LocationID']
                x, y = observed_ice[0].metadata['UTMEast'], observed_ice[0].metadata['UTMNorth']
                lake_name = observed_ice[0].metadata['LocationName']
                if not lake_name:
                    lake_name = 'E{} N{}'.format(x, y)

                location_id_metadata[location_id] = {'RegionName': region_name,
                                                     'LakeID': lake_id,
                                                     'LakeName': lake_name,
                                                     'PlotFileName': plot_filename}

        mp.pickle_anything([all_calculated, all_observed], pickle_file_name_and_path)

        try:
            json_string = json.dumps(location_id_metadata, ensure_ascii=False).encode('utf-8')
            with open(location_id_metadata_json, 'wb') as f:
                f.write(json_string)
        except:
            error_msg = sys.exc_info()[0]
            ml.log_and_print("[error]calculateandplot.py -> plot_season_for_all_regobs_locations: Cant write json. {}".format(error_msg))

    else:
        [all_calculated, all_observed] = mp.unpickle_anything(pickle_file_name_and_path)

    try:
       pts.scatter_calculated_vs_observed(all_calculated, all_observed, year)
    except:
       error_msg = sys.exc_info()[0]
       ml.log_and_print("[error] calculateandplot.py -> plot_season_for_all_regobs_locations: {}. Could not plot scatter {}.".format(error_msg, year))
コード例 #50
0
def calculate_and_plot9d_season(period='2018-19'):
    """Calculate ice columns for 9 days and make plots of all ice thickness for a given season or optionally 'Today'.

    The inner workings:
    1.1 Retrieves ice thickness observations from regObs. If period is given as a season, all observations for
        this season will be requested. All previous plots and local storage will be deleted.
    1.2 If period='Today' ice thickness observations from today will be requested and plotted. Older plots will be
        in the folder. Metadata dict will be merged.
    2.  Calculate the 9 day prognosis from the observation time and plots the result.
    3.  Make a metadata json for handling files on iskart.no. Only confirmed files in folder will be
        added to metadata json.

    :param period:    [String] Default is current season (2017-18).
    :return:
    """

    log_referance = 'calculateandplot.py -> calculate_and_plot9d_season'

    # File names
    regid_metadata_json = '{}regid_metadata.json'.format(se.ni_dogn_plots_folder)
    regid_metadata_pickle = '{}regid_metadata.pickle'.format(se.local_storage)

    if period == 'Today':
        ice_thicks = gro.get_ice_thickness_today()

    else:
        # Empty the 9dogn folder
        # for file in os.listdir(se.ni_dogn_plots_folder):
        #     file_path = os.path.join(se.ni_dogn_plots_folder, file)
        #     try:
        #         if os.path.isfile(file_path):
        #             os.unlink(file_path)
        #     except OSError:
        #         pass

        # remove pickle with metadata
        try:
            os.remove(regid_metadata_pickle)
        except OSError:
            pass

        # Get new observations
        ice_thicks = gro.get_ice_thickness_observations(period, reset_and_get_new=True)

    # Calculate and plot
    for k, v in ice_thicks.items():

        # If file is missing, make it. If observation is older than 11 days it is based on gridded data for sure and no plot file needed.
        make_plot = False
        max_file_age = 11
        date_limit = dt.datetime.now() - dt.timedelta(days=max_file_age)
        file_names = os.listdir(se.ni_dogn_plots_folder)
        plot_filename = '{0}.png'.format(k)
        if plot_filename not in file_names:
            make_plot = True
        else:
            if v.date.date() > date_limit.date():
                make_plot = True

        if make_plot:
            try:
                calculate_and_plot9d_regid(k, plot_folder=se.ni_dogn_plots_folder, observed_ice=v)
            except:
                error_msg = sys.exc_info()[0]
                ml.log_and_print("[Error] {} Error making plot for {} {}".format(log_referance, k, error_msg))

    # Make json with metadata for using files on iskart.no. Load metadata from pickle if available and
    # new observations where a plot is available will be made.
    if not os.path.exists(regid_metadata_pickle):
        regid_metadata = {}
    else:
        regid_metadata = mp.unpickle_anything(regid_metadata_pickle)

    list_of_plots = os.listdir(se.ni_dogn_plots_folder)

    for k, v in ice_thicks.items():
        # only add metadata on files that are in the folder
        if '{0}.png'.format(k) in list_of_plots:
            date = v.date.date()

            region_name = v.metadata['OriginalObject']['ForecastRegionName']
            if not region_name:
                region_name = 'Ukjent region'
            x, y = v.metadata['UTMEast'], v.metadata['UTMNorth']
            lake_id = v.metadata['LocationID']
            lake_name = v.metadata['LocationName']
            if not lake_name:
                lake_name = 'E{} N{}'.format(x,y)

            regid_metadata[k] = {'RegionName':region_name,'LakeID':lake_id,'LakeName':lake_name,'Date':'{}'.format(date)}

    mp.pickle_anything(regid_metadata, regid_metadata_pickle)

    json_string = json.dumps(regid_metadata, ensure_ascii=False).encode('utf-8')
    with open(regid_metadata_json, 'wb') as f:
        f.write(json_string)
コード例 #51
0
def _make_plot_dangerlevels_simple(warnings, all_avalanche_evaluations,
                                   file_path, from_date, to_date):
    """Works best for plotting multiple regions over one season.

    :param warnings:
    :param all_avalanche_evaluations:
    :param file_path:
    :param from_date:
    :param to_date:
    :return:
    """

    # count corrects
    correct_not_given = 0
    correct_to_low = 0
    correct_correct = 0
    correct_to_high = 0

    for e in all_avalanche_evaluations:
        if e.ForecastCorrectTID == 0:
            correct_not_given += 1
        elif e.ForecastCorrectTID == 1:
            correct_correct += 1
        elif e.ForecastCorrectTID == 2:
            correct_to_low += 1
        elif e.ForecastCorrectTID == 3:
            correct_to_high += 1
        else:
            ml.log_and_print(
                "allforecasteddangerlevels.py -> _make_plot_dangerlevels_simple: Illegal ForecastCorrectTID given.",
                log_it=True,
                print_it=False)

    correct_total = correct_correct + correct_to_high + correct_to_low

    # find dangerlevels pr day
    all_danger_levels = []

    for w in warnings:
        all_danger_levels.append(w.danger_level)

    dl1 = all_danger_levels.count(1)
    dl2 = all_danger_levels.count(2)
    dl3 = all_danger_levels.count(3)
    dl4 = all_danger_levels.count(4)
    dl5 = all_danger_levels.count(5)

    dict_of_dates = {}
    list_of_dates = [
        from_date + dt.timedelta(days=x)
        for x in range(0, (to_date - from_date).days)
    ]

    for d in list_of_dates:
        date_with_data_obj = AllDangersAndCorrectsOnDate(d)

        for w in warnings:
            if w.date == d:
                # if w.region_regobs_id < 149:    # ordinay forecast
                date_with_data_obj.add_danger(w.danger_level)
                # else:                           # county forecast on dangerlevel 4 or 5
                #    date_with_data_obj.add_county_danger(w.danger_level)

        dict_of_dates[d] = date_with_data_obj

    # Figure dimensions
    fsize = (12, 8)
    fig = plt.figure(figsize=fsize)
    plt.clf()

    head_x = 0.23
    head_y = 0.9

    fig.text(head_x + 0.018,
             head_y,
             "Varsom snøskredvarsel for sesongen {0}-{1}".format(
                 from_date.strftime('%Y'), to_date.strftime('%y')),
             fontsize=19)
    fig.text(
        head_x + 0.05,
        head_y - 0.05,
        "Antall:  {0} fg1.  {1} fg2.  {2} fg3.  {3} fg4.  {4} fg5.".format(
            dl1, dl2, dl3, dl4, dl5),
        fontsize=15)
    fig.text(
        head_x - 0.06,
        head_y - 0.09,
        "Treffsikkerhet av {0} vurderinger: {1:.0f}% riktig ({2:.0f}% for høy og {3:.0f}% for lav)"
        .format(correct_total, 100 * correct_correct / correct_total,
                100 * correct_to_high / correct_total,
                100 * correct_to_low / correct_total),
        fontsize=15)

    dl_colors = ['0.5', '#ccff66', '#ffff00', '#ff9900', '#ff0000', 'k']

    for v in dict_of_dates.values():
        if len(v.dangers) > 0:
            line_start = 0  # -1*(v.dangers.count(1) + v.dangers.count(2))
            line_end = line_start
            for dl in range(0, 6, 1):
                line_end += v.dangers.count(dl)
                plt.vlines(v.date,
                           line_start,
                           line_end,
                           lw=3.9,
                           colors=dl_colors[dl])
                line_start = line_end

    plt.ylabel("Antall varsel")

    fig = plt.gcf()
    fig.subplots_adjust(top=0.75)
    fig.subplots_adjust(bottom=0.15)

    # full control of the axis
    ax = plt.gca()
    ax.spines['right'].set_visible(False)
    # ax.spines['left'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ax.yaxis.grid(True)
    # ax.xaxis.set_ticks_position('none')
    ax.yaxis.set_ticks_position('none')
    ax.set_ylim([0, 28])
    ax.set_xlim([from_date, to_date])

    # fig.text(0.1, 0.1, 'Title', fontsize=14, zorder=6, color='k', bbox={'facecolor': 'silver', 'alpha': 0.5, 'pad': 4})
    legend_x = 0.20
    legend_y = 0.05
    fig.text(0.01 + legend_x,
             0.005 + legend_y,
             'lll',
             color=dl_colors[1],
             fontsize=5,
             bbox={'facecolor': dl_colors[1]})
    fig.text(0.03 + legend_x, 0 + legend_y, 'fg1 - Liten')
    fig.text(0.12 + legend_x,
             0.005 + legend_y,
             'lll',
             color=dl_colors[2],
             fontsize=5,
             bbox={'facecolor': dl_colors[2]})
    fig.text(0.14 + legend_x, 0 + legend_y, 'fg2 - Moderat')
    fig.text(0.26 + legend_x,
             0.005 + legend_y,
             'lll',
             color=dl_colors[3],
             fontsize=5,
             bbox={'facecolor': dl_colors[3]})
    fig.text(0.28 + legend_x, 0 + legend_y, 'fg3 - Betydelig')
    fig.text(0.40 + legend_x,
             0.005 + legend_y,
             'lll',
             color=dl_colors[4],
             fontsize=5,
             bbox={'facecolor': dl_colors[4]})
    fig.text(0.42 + legend_x, 0 + legend_y, 'fg4 - Stor')
    fig.text(0.50 + legend_x,
             0.005 + legend_y,
             'lll',
             color=dl_colors[5],
             fontsize=5,
             bbox={'facecolor': dl_colors[5]})
    fig.text(0.52 + legend_x, 0 + legend_y, 'fg5 - Meget stor')

    plt.savefig("{0}".format(file_path))
    plt.close(fig)
コード例 #52
0
def calculate_and_plot_location(location_name, from_date, to_date, sub_plot_folder='', make_plots=True, return_values=False):
    """ due to get_all_season_ice returns data grouped be location_id
    For a given LocationName in regObs calculate the ice cover between two dates. Optional, make plots
    and/or return the calculations and observations for this location. Different sources for weather data
    may be given, chartserver grid is default.

    :param location_name:
    :param from_date:               [String] 'yyyy-mm-dd'
    :param to_date:                 [String] 'yyyy-mm-dd'
    :param sub_plot_folder:
    :param make_plots:
    :param return_values:           [bool]  If true the calculated and observed data is returned
    """

    loc = slp.get_for_location(location_name)
    year = '{0}-{1}'.format(from_date[0:4], to_date[2:4])
    lake_file_name = '{0} {1}'.format(fe.make_standard_file_name(loc.file_name), year)
    observed_ice = gro.get_observations_on_location_id(loc.regobs_location_id, year)

    # Change dates to datetime. Some of the getdata modules require datetime
    from_date = dt.datetime.strptime(from_date, '%Y-%m-%d')
    to_date = dt.datetime.strptime(to_date, '%Y-%m-%d')

    # special rule for this season.
    if year == '2018-19':
        from_date = dt.datetime(2017, 10, 15)

    # if to_date forward in time, make sure it doesnt go to far..
    if to_date > dt.datetime.now():
        to_date = dt.datetime.now() + dt.timedelta(days=7)

    if loc.weather_data_source == 'eKlima':
        wsTemp = gws.getMetData(loc.eklima_TAM, 'TAM', from_date, to_date, 0, 'list')
        temp, date = we.strip_metadata(wsTemp, True)

        wsSno = gws.getMetData(loc.eklima_SA, 'SA', from_date, to_date, 0, 'list')
        snotot = we.strip_metadata(wsSno)
        sno = dp.delta_snow_from_total_snow(snotot)

        # Clouds. If not from met.no it is parametrised from precipitation.
        if loc.eklima_NNM:
            wsCC = gws.getMetData(loc.eklima_NNM, 'NNM', from_date, to_date, 0, 'list')
            cc = we.strip_metadata(wsCC)
        else:
            cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} eklima.png'.format(se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'grid':
        x, y = loc.utm_east, loc.utm_north

        gridTemp = gts.getgts(x, y, 'tm', from_date, to_date)
        gridSno = gts.getgts(x, y, 'sdfsw', from_date, to_date)
        gridSnoTot = gts.getgts(x, y, 'sd', from_date, to_date)

        temp, date = we.strip_metadata(gridTemp, get_date_times=True)
        sno = we.strip_metadata(gridSno, False)
        snotot = we.strip_metadata(gridSnoTot, False)

        if loc.eklima_NNM:
            wsCC = gws.getMetData(loc.eklima_NNM, 'NNM', from_date, to_date, 0, 'list')
            cc = we.strip_metadata(wsCC)
        else:
            cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} grid.png'.format(se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'nve':
        x, y = loc.utm_east, loc.utm_north

        # Temp from NVE station or grid if not.
        if loc.nve_temp:
            temp_obj = gcsd.getStationdata(loc.nve_temp, '17.1', from_date, to_date, timeseries_type=0)
        else:
            temp_obj = gcsd.getGriddata(x, y, 'tm', from_date, to_date)
        temp, date = we.strip_metadata(temp_obj, get_date_times=True)

        # Snow from NVE station or grid if not.
        if loc.nve_snow:
            snotot_obj = gcsd.getStationdata(loc.nve_snow, '2002.1', from_date, to_date, timeseries_type=0)
            snotot = we.strip_metadata(snotot_obj)
            sno = dp.delta_snow_from_total_snow(snotot_obj)
        else:
            snotot_obj = gcsd.getGriddata(x, y, 'sd', from_date, to_date, timeseries_type=0)
            sno_obj = gcsd.getGriddata(x, y, 'fsw', from_date, to_date, timeseries_type=0)
            snotot = we.strip_metadata(snotot_obj)
            sno = we.strip_metadata(sno_obj)

        # Clouds. If not from met.no it is parametrised from precipitation.
        if loc.eklima_NNM:
            cc_obj = gws.getMetData(18700, 'NNM', from_date, to_date, 0, 'list')
        else:
            cc_obj = dp.clouds_from_precipitation(sno)
        cc = we.strip_metadata(cc_obj)

        plot_filename = '{0}{1} nve.png'.format(se.plot_folder + sub_plot_folder, lake_file_name)

    elif loc.weather_data_source == 'file':
        date, temp, sno, snotot = gfd.read_weather(from_date, to_date, loc.input_file)
        cc = dp.clouds_from_precipitation(sno)

        plot_filename = '{0}{1} file.png'.format(se.plot_folder + sub_plot_folder, lake_file_name)

    else:
        ml.log_and_print("[Error] runicethickness -> calculate_and_plot_location: Invalid scource for weather data.")
        return

    try:
        if len(observed_ice) == 0:
            calculated_ice = it.calculate_ice_cover_air_temp(ice.IceColumn(date[0], []), date, temp, sno, cc)
        else:
            calculated_ice = it.calculate_ice_cover_air_temp(copy.deepcopy(observed_ice[0]), date, temp, sno, cc)

        if make_plots:
            pts.plot_ice_cover(calculated_ice, observed_ice, date, temp, sno, snotot, plot_filename)

    except:
        error_msg = sys.exc_info()[0]
        ml.log_and_print("[Error] calculateandplot.py -> calculate_and_plot_location: {}. Could not plot {}.".format(error_msg, location_name))
        calculated_ice = None

    if return_values:
        return calculated_ice, observed_ice