コード例 #1
0
def pickle_warnings(regions, date_from, date_to, pickle_file_name):
    '''
    All warnings and problems are selected from regObs or the avalanche api and neatly pickel'd for later use.

    :param regions:            list []
    :param date_from:          string as 'yyyy-mm-dd'
    :param date_to:            string as 'yyyy-mm-dd'
    :param pickle_file_name:   filename including directory as string
    :return:
    '''

    warnings = []

    for r in regions:

        # get all warning and problems for this region and then loop though them joining them on date
        __warnings = GFA.get_warnings(r, date_from, date_to)
        __name = GRO.get_forecast_region_name(r)
        __problems = GRO.get_problems_from_AvalancheWarnProblemV(__name, r, date_from, date_to)

        print '{0} problems found for {1}'.format(len(__problems), __name)

        for i in range(0, len(__warnings), 1):
            for j in range(0, len(__problems), 1):
                if __warnings[i].date == __problems[j].date:
                    __warnings[i].add_problem(__problems[j])

        warnings = warnings + __warnings

    RP.pickle_anything(warnings, pickle_file_name)
コード例 #2
0
def pickle_data_set(warnings, file_name, use_ikke_gitt):
    '''
    Takes the warnings data set which is a list of class AvalancheDanger objects and makes a nested dictionary data set
    of if. This makes the data set easier to distribute and use if the AvalancheDanger and AvalancheProblem classes
    are missing.

    The data set also includes information on what the xKDV tables in regObs contains and preferred colors when
    plotting.

    :param warnings:        list of AvalancheDanger objects
    :param file_name:       filename to pickle the data to
    :param use_ikke_gitt:   If I dont whant to use the ID = 0 (Ikke gitt) values they can be omitted all in all.

    :return:
    '''

    level_list = []
    size_list = []
    trigger_list = []
    probability_list = []
    distribution_list = []

    for w in warnings:
        if w.danger_level > 0 and len(w.avalanche_problems) > 0:
            level_list.append(w.danger_level)
            size_list.append(w.avalanche_problems[0].aval_size)
            trigger_list.append(w.avalanche_problems[0].aval_trigger)
            probability_list.append(w.avalanche_problems[0].aval_probability)
            distribution_list.append(w.avalanche_problems[0].aval_distribution)

    level_keys = GRO.get_kdv('AvalancheDangerKDV').keys()
    size_keys = GRO.get_kdv('DestructiveSizeKDV').values()
    triggers_keys = GRO.get_kdv('AvalTriggerSimpleKDV').values()
    probability_keys = GRO.get_kdv('AvalProbabilityKDV').values()
    distribution_keys = GRO.get_kdv('AvalPropagationKDV').values()

    level_colors = ['0.5','#ccff66', '#ffff00', '#ff9900', '#ff0000', 'k']

    if use_ikke_gitt == False:
        level_keys.pop(0)
        size_keys.pop(0)
        triggers_keys.pop(0)
        probability_keys.pop(0)
        distribution_keys.pop(0)

        level_colors.pop(0)

    data_set = {'level': {'values': level_list, 'keys': level_keys, 'colors':level_colors},
                'size': {'values': size_list, 'keys': size_keys, 'colors':['0.7']},
                'trigger': {'values': trigger_list, 'keys': triggers_keys, 'colors':['0.7']},
                'probability': {'values': probability_list, 'keys': probability_keys, 'colors':['0.7']},
                'distribution': {'values': distribution_list, 'keys': distribution_keys, 'colors':['0.7']}}

    RP.pickle_anything(data_set, file_name)
コード例 #3
0
ファイル: getRegObsdata.py プロジェクト: pslota/Ice-modelling
def get_kdv(x_kdv):
    '''
    Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
    An x_kdv is requested from the regObs api if a pickle file newer than a week exists.

    :param x_kdv:    [string]    x_kdv view
    :return dict:   {}          x_kdv as a dictionary

    Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
    Ex of url for returning values for IceCoverKDV in norwegian:
    http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json

    '''


    kdv_file = '{0}{1}.pickle'.format(kdv_elements_folder, x_kdv)
    dict = {}

    if os.path.exists(kdv_file):

        ### Should be useful to test if the file is old and if so make a new one
        # max_file_age = 3
        # file_date = time.ctime(os.path.getctime(kdv_file))
        # date_limit = datetime.datetime.now() - datetime.timedelta(days=max_file_age)
        ###

        #print("Getting KDV from pickle:{0}".format(kdv_file))
        dict = mp.unpickle_anything(kdv_file)

    else:
        url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
            .format(api_version, x_kdv)

        print("Getting KDV from URL:{0}".format(url))

        kdv = requests.get(url).json()

        for a in kdv['d']['results']:
            try:
                if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26:      # this table gets special treatment
                    dict[a["ID"]] = fe.remove_norwegian_letters(a["Description"])
                else:
                    dict[a["ID"]] = fe.remove_norwegian_letters(a["Name"])
            except (RuntimeError, TypeError, NameError):
                pass

            mp.pickle_anything(dict, kdv_file)

    return dict
コード例 #4
0

    def add_Inflow_DOP(self, Inflow_DOP_inn):
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn


    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn


    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today()-dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path +'.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +'.pickle')

    mfd.write_mylake_inputfile(data2)
コード例 #5
0
    def add_Inflow_DOP(self, Inflow_DOP_inn):
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn

    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn,
                                                self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn

    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today() - dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path + '.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +
                                 '.pickle')

    mfd.write_mylake_inputfile(data2)