Exemplo n.º 1
0
def write_hrrr_grib2txt(date=datetime.datetime.now(),filenum = 24,hour = 0,directory=os.getcwd(),enddirectory=os.getcwd(),loc=[36.605, -97.485], indexes = None,write_modelhours = False):
    """
    grabs the hrrr file corresponding to date and filenum-1 files after that for a given hour or the model predictions so many hours out at a specified time
    reads and compiles the data for a specific location and writes it to a json file
    """
    wkdir = os.getcwd()
    
    if ((type(hour) == list) and not write_modelhours):
        print 'error, can only write one model hour at a time if write_modelhours = False'
        return
    
    newfilename = produce_hrrr_txt_string(date=date,hour=hour,filenum=filenum,loc=loc,indexes =indexes,modelhours=write_modelhours)

    if newfilename in os.listdir(enddirectory):
        print 'error file already exists'
        return newfilename
    
    if not write_modelhours:
        
        datestrings = []
    
        for i in range(filenum):
            datestrings.append(datetime.datetime(date.year,date.month,date.day,date.hour+i))
            
        hourslists = [[hour] for i in range(filenum)]
        
               
    else:
        date = date-datetime.timedelta(hours=min(hour))
        filenum = hour[1]-hour[0]+1
        
        datestrings = [date]
        hourslists = [range(hour[0],hour[1]+1)]
        
    filelists = produce_hrrr_grib2strings(datestrings,hourslists)
    
    if filelists == []:
        return ''
        
    data = []

    
    dates = []
    k = -1
    
    for i in range(len(filelists)):
        if filelists[i] in os.listdir(directory):
            x = read_hrrr_spec(filename = filelists[i], directory = directory,no_txt = True,coords=indexes)
            print filelists[i]
            if x != None and i>k:
                k = len(filelists)+1
                parameterlist = x[1]
                loc = x[2]
                indexes = x[3]
                units = x[4]
            if x == None:
                continue
            x[0] = np.array(x[0])
            x[0] = x[0].tolist()
            data.append(x[0])
            x = None
            dates.append(matplotlib.dates.date2num(datestrings[i]))
            
    if not ('parameterlist' in vars().keys()):
        return
    
    os.chdir(enddirectory)
    
    
    #remove HRRR hours that have missing pressure levels
    i = 0
    while i<len(data):
        if data[i] != None:
            for j in data[i]:
                if type(j) == type(np.array([])):
                    try:
                        data.pop(i)
                        dates.pop(i)
                    except IndexError:
                        pass
        else:
            try:
                data.pop(i)
                dates.pop(i)
            except IndexError:
                pass
        i = i+1
    
    f = open(newfilename, 'w')     
    try:
        json.dump([data,dates,parameterlist,loc,indexes,units],f)
    except TypeError:
        print "array found in json export error -> pressure levels missing from some hour"
        return ''
        
    
    f.close()
    
    os.chdir(wkdir)
    
    return newfilename
    
    
    
    
    
    
    
    
    
    
Exemplo n.º 2
0
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np

try:
    conn = psycopg2.connect(
        "dbname='tfgdatosmodificados' user='******' host='localhost' password='******'"
    )
except:
    print("I am unable to connect to the database")

cur = conn.cursor()
cur.execute("""SELECT *
FROM travel_time_intersection_to_tollgate_modified ;""")
rows = cur.fetchall()
dates = sorted(set([date[2][1].date().strftime('%Y-%m-%d') for date in rows]))
dates.pop()
pairs = [['B', 1], ['B', 3], ['A', 2], ['A', 3], ['C', 1], ['C', 3]]
pairs = sorted(pairs)
keys = {}

for date in dates:
    for pair in pairs:
        keys[(pair[0], pair[1], date)] = []

counter = 0
for row in rows:
    if (row[2][0].date().strftime('%Y-%m-%d') != '2016-10-18'):
        keys[(row[0], row[1],
              row[2][0].date().strftime('%Y-%m-%d'))].append(row[3])

print(keys[('A', 2, '2016-09-04')])
Exemplo n.º 3
0
def write_hrrr_grib2txt(date=datetime.datetime.now(),
                        filenum=24,
                        hour=0,
                        directory=os.getcwd(),
                        enddirectory=os.getcwd(),
                        loc=[36.605, -97.485],
                        indexes=None,
                        write_modelhours=False):
    """
    grabs the hrrr file corresponding to date and filenum-1 files after that for a given hour or the model predictions so many hours out at a specified time
    reads and compiles the data for a specific location and writes it to a json file
    """
    wkdir = os.getcwd()

    if ((type(hour) == list) and not write_modelhours):
        print 'error, can only write one model hour at a time if write_modelhours = False'
        return

    newfilename = produce_hrrr_txt_string(date=date,
                                          hour=hour,
                                          filenum=filenum,
                                          loc=loc,
                                          indexes=indexes,
                                          modelhours=write_modelhours)

    if newfilename in os.listdir(enddirectory):
        print 'error file already exists'
        return newfilename

    if not write_modelhours:

        datestrings = []

        for i in range(filenum):
            datestrings.append(
                datetime.datetime(date.year, date.month, date.day,
                                  date.hour + i))

        hourslists = [[hour] for i in range(filenum)]

    else:
        date = date - datetime.timedelta(hours=min(hour))
        filenum = hour[1] - hour[0] + 1

        datestrings = [date]
        hourslists = [range(hour[0], hour[1] + 1)]

    filelists = produce_hrrr_grib2strings(datestrings, hourslists)

    if filelists == []:
        return ''

    data = []

    dates = []
    k = -1

    for i in range(len(filelists)):
        if filelists[i] in os.listdir(directory):
            x = read_hrrr_spec(filename=filelists[i],
                               directory=directory,
                               no_txt=True,
                               coords=indexes)
            print filelists[i]
            if x != None and i > k:
                k = len(filelists) + 1
                parameterlist = x[1]
                loc = x[2]
                indexes = x[3]
                units = x[4]
            if x == None:
                continue
            x[0] = np.array(x[0])
            x[0] = x[0].tolist()
            data.append(x[0])
            x = None
            dates.append(matplotlib.dates.date2num(datestrings[i]))

    if not ('parameterlist' in vars().keys()):
        return

    os.chdir(enddirectory)

    #remove HRRR hours that have missing pressure levels
    i = 0
    while i < len(data):
        if data[i] != None:
            for j in data[i]:
                if type(j) == type(np.array([])):
                    try:
                        data.pop(i)
                        dates.pop(i)
                    except IndexError:
                        pass
        else:
            try:
                data.pop(i)
                dates.pop(i)
            except IndexError:
                pass
        i = i + 1

    f = open(newfilename, 'w')
    try:
        json.dump([data, dates, parameterlist, loc, indexes, units], f)
    except TypeError:
        print "array found in json export error -> pressure levels missing from some hour"
        return ''

    f.close()

    os.chdir(wkdir)

    return newfilename