Esempio n. 1
0
def get_kdv(x_kdv):
    '''
    Imports a x_kdv view from regObs and returns a dictionary with <key, value> = <ID, Name>
    An x_kdv is requested from the regObs api if a pickle file newer than a week exists.

    :param x_kdv:    [string]    x_kdv view
    :return dict:   {}          x_kdv as a dictionary

    Ex of use: aval_cause_kdv = get_kdv('AvalCauseKDV')
    Ex of url for returning values for IceCoverKDV in norwegian:
    http://api.nve.no/hydrology/regobs/v0.9.4/OData.svc/ForecastRegionKDV?$filter=Langkey%20eq%201%20&$format=json

    '''


    kdv_file = '{0}{1}.pickle'.format(kdv_elements_folder, x_kdv)
    dict = {}

    if os.path.exists(kdv_file):

        ### Should be useful to test if the file is old and if so make a new one
        # max_file_age = 3
        # file_date = time.ctime(os.path.getctime(kdv_file))
        # date_limit = datetime.datetime.now() - datetime.timedelta(days=max_file_age)
        ###

        #print("Getting KDV from pickle:{0}".format(kdv_file))
        dict = mp.unpickle_anything(kdv_file)

    else:
        url = 'http://api.nve.no/hydrology/regobs/{0}/OData.svc/{1}?$filter=Langkey%20eq%201%20&$format=json'\
            .format(api_version, x_kdv)

        print("Getting KDV from URL:{0}".format(url))

        kdv = requests.get(url).json()

        for a in kdv['d']['results']:
            try:
                if 'AvalCauseKDV' in url and a['ID'] > 9 and a['ID'] < 26:      # this table gets special treatment
                    dict[a["ID"]] = fe.remove_norwegian_letters(a["Description"])
                else:
                    dict[a["ID"]] = fe.remove_norwegian_letters(a["Name"])
            except (RuntimeError, TypeError, NameError):
                pass

            mp.pickle_anything(dict, kdv_file)

    return dict
Esempio n. 2
0

    def add_Inflow_DOP(self, Inflow_DOP_inn):
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn


    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn


    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today()-dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path +'.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +'.pickle')

    mfd.write_mylake_inputfile(data2)
Esempio n. 3
0
    def add_Inflow_DOP(self, Inflow_DOP_inn):
        messages = we.test_for_missing_elements(Inflow_DOP_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOP = Inflow_DOP_inn

    def add_Inflow_Chla(self, Inflow_Chla_inn):
        messages = we.test_for_missing_elements(Inflow_Chla_inn,
                                                self.from_date, self.to_date)
        self.metadata += messages
        self.Inflow_Chla = Inflow_Chla_inn

    def add_Inflow_DOC(self, Inflow_DOC_inn):
        messages = we.test_for_missing_elements(Inflow_DOC_inn, self.from_date,
                                                self.to_date)
        self.metadata += messages
        self.Inflow_DOC = Inflow_DOC_inn


if __name__ == "__main__":

    yesturday = (dt.date.today() - dt.timedelta(days=1)).strftime("%Y-%m-%d")
    #harvest_and_save_blindern('2000-01-01', yesturday)
    #harvest_and_save_nordnesfjelet('2014-08-01', yesturday)

    data = harvest_for_mylake_hakkloa('2013-04-01', '2015-10-01')
    mp.pickle_anything(data, data.output_file_path + '.pickle')
    data2 = mp.unpickle_anything('{0}HAK_input'.format(env.data_path) +
                                 '.pickle')

    mfd.write_mylake_inputfile(data2)
Esempio n. 4
0
if __name__ == "__main__":

    # regions_kdv = GRO.get_kdv("ForecastRegionKDV")
    regions = list(range(106, 134))     # ForecastRegionTID = 133 is the last and is Salten

    date_from = "2014-12-01"
    date_to = "2015-06-01"
    pickle_warnings_file_name = '{0}{1}'.format(data_folder, 'runForMatrix warnings.pickle')
    pickle_data_set_file_name = '{0}{1}'.format(data_folder, 'runForMatrix data set.pickle')

    #### With it piclked you dont need to read on the api all the time ####
    # pickle_warnings(regions, date_from, date_to, pickle_warnings_file_name)
    # warnings = RP.unpickle_anything(pickle_warnings_file_name)
    # pickle_data_set(warnings, pickle_data_set_file_name, False)
    data_set = RP.unpickle_anything(pickle_data_set_file_name)

    plot_histogram('frequency of levels', data_set['level']['values'], data_set['level']['keys'],
                '{0}Histogram of levels {1} to {2}.png'.format(plot_folder, date_from, date_to))
    plot_histogram('frequency of sizes', data_set['size']['values'], data_set['size']['keys'],
                '{0}Histogram of sizes {1} to {2}.png'.format(plot_folder, date_from, date_to))
    plot_histogram('frequency of triggers', data_set['trigger']['values'], data_set['trigger']['keys'],
                '{0}Histogram of triggers {1} to {2}.png'.format(plot_folder, date_from, date_to))
    plot_histogram('frequency of probabilities', data_set['probability']['values'], data_set['probability']['keys'],
                '{0}Histogram of probabilities {1} to {2}.png'.format(plot_folder, date_from, date_to))
    plot_histogram('frequency of distribution', data_set['distribution']['values'], data_set['distribution']['keys'],
                '{0}Histogram of distribution {1} to {2}.png'.format(plot_folder, date_from, date_to))

    plot_histogram_on_danger_level('', 'size', data_set,
                '{0}Histogram of size on danger level {1} to {2}.png'.format(plot_folder, date_from, date_to))
    plot_histogram_on_danger_level('', 'trigger', data_set,