Пример #1
0
    def _parse(self):
        file_data = self._downloadFile()

        string = '\r\n\r\n\r\n'
        members = np.array(file_data.split(string))
        members = members[0:len(members) - 1]
        num_members = len(members)
        profiles = {}
        dates = None
        mean_member = None

        for n, mem_txt in enumerate(members):
            mem_name, mem_profs, mem_dates = self._parseMember(mem_txt)
            profiles[mem_name] = mem_profs
            dates = mem_dates

            if mean_member is None:
                mean_member = mem_name

        prof_coll = prof_collection.ProfCollection(profiles, dates)
        prof_coll.setHighlightedMember(mean_member)
        prof_coll.setMeta('loc', profiles[mean_member][0].location)
        prof_coll.setMeta('observed', False)
        prof_coll.setMeta('base_time', dates[0])
        return prof_coll
Пример #2
0
    def _parse(self):
        file_data = self._downloadFile()
        ## read in the file
        data = np.array([l.strip() for l in file_data.split('\n')])

        ## necessary index points
        title_idx = np.where(data == '%TITLE%')[0][0]
        start_idx = np.where(data == '%RAW%')[0] + 1
        finish_idx = np.where(data == '%END%')[0]

        ## create the plot title
        data_header = data[title_idx + 1].split()
        location = data_header[0]
        time = datetime.strptime(data_header[1][:11], '%y%m%d/%H%M')

        if time > datetime.utcnow(
        ):  #If the strptime accidently makes the sounding the future:
            # If the strptime accidently makes the sounding in the future (like with SARS archive)
            # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
            time = datetime.strptime('19' + data_header[1][:11], '%Y%m%d/%H%M')

        ## put it all together for StringIO
        full_data = '\n'.join(data[start_idx:finish_idx][:])
        sound_data = StringIO(full_data)

        ## read the data into arrays
        p, h, T, Td, wdir, wspd = np.genfromtxt(sound_data,
                                                delimiter=',',
                                                comments="%",
                                                unpack=True)
        #       idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        pres = p  #[idx]
        hght = h  #[idx]
        tmpc = T  #[idx]
        dwpc = Td  #[idx]
        wspd = wspd  #[idx]
        wdir = wdir  #[idx]

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw',
                                      pres=pres,
                                      hght=hght,
                                      tmpc=tmpc,
                                      dwpc=dwpc,
                                      wdir=wdir,
                                      wspd=wspd,
                                      location=location,
                                      date=time,
                                      latitude=35.)

        prof_coll = prof_collection.ProfCollection(
            {'': [prof]},
            [time],
        )

        prof_coll.setMeta('loc', location)
        prof_coll.setMeta('observed', True)
        prof_coll.setMeta('base_time', time)
        return prof_coll
Пример #3
0
    def _parse(self):
        file_data = self._downloadFile()

        file_profiles = file_data.split('\n\n\n')
        profiles = {}
        dates = []
        date_init = None
        loc = None
        for m in file_profiles:
            try:
                prof, dt_obj, init_dt, member = self._parseSection(m)
            except Exception as e:
                #    print(e)
                continue

            loc = prof.location
            # Try to add the profile object to the list of profiles for this member
            try:
                profiles[member] = profiles[member] + [prof]
            except Exception as e:
                profiles[member] = [prof]
            if not dt_obj in dates:
                dates.append(dt_obj)
            if date_init is None or init_dt < date_init:
                date_init = init_dt
            #print(profiles)
        prof_coll = prof_collection.ProfCollection(profiles, dates)
        if "MEAN" in list(profiles.keys()):
            prof_coll.setHighlightedMember("MEAN")
        prof_coll.setMeta('observed', False)
        prof_coll.setMeta('base_time', date_init)
        prof_coll.setMeta('loc', loc)
        return prof_coll
Пример #4
0
class PECANDecoder(Decoder):
    def __init__(self, file_name):
        super(PECANDecoder, self).__init__(file_name)

    def _parse(self):
        file_data = self._downloadFile()

        file_profiles = file_data.split('\n\n\n')

        profiles = {}
        dates = []
        for m in file_profiles:
            try:
                prof, dt_obj, member = self._parseSection(m)
            except:
                continue

            # Try to add the profile object to the list of profiles for this member
            try:
                profiles[member] = profiles[member] + [prof]
            except Exception, e:
                profiles[member] = [prof]
            dates.append(dt_obj)

        prof_coll = prof_collection.ProfCollection(profiles, dates)
        prof_coll.setMeta('observed', False)
        prof_coll.setMeta('base_time', dates[0])
        return prof_coll
Пример #5
0
    def _parse(self):
        file_data = self._downloadFile()
        snfile = [l for l in file_data.split('\n')]

        bgn = -1
        end = -1
        ttl = -1
        stl = -1

        for i in range(len(snfile)):
            if snfile[i] == "<PRE>": 
                bgn = i+5
            if snfile[i][:10] == "</PRE><H3>": 
                end = i-1
            if snfile[i][:4] == "<H2>" and snfile[i][-5:] == "</H2>": 
                ttl = i
            if 'Station latitude' in snfile[i]:
                stl = i

        if bgn == -1 or end == -1 or ttl == -1:
            raise IOError("Looks like the server had difficulty handling the request.  Try again in a few minutes.")

        snd_data = []
        for i in range(bgn, end+1):
            vals = []
            for j in [ 0, 1, 2, 3, 6, 7 ]:
                val = snfile[i][(7 * j):(7 * (j + 1))].strip()

                if val == "":
                    vals.append(UWYODecoder.MISSING)
                else:
                    vals.append(float(val))
            snd_data.append(vals)

        col_names = ['pres', 'hght', 'tmpc', 'dwpc', 'wdir', 'wspd']
        snd_dict = dict((v, p) for v, p in zip(col_names, list(zip(*snd_data))))

        snd_date = datetime.strptime(snfile[ttl][-20:-5], "%HZ %d %b %Y")

        loc = snfile[ttl][10:14]
        if stl == -1:
            lat = 35.
        else:
            lat = float(snfile[stl].split(':')[-1].strip())

        prof = profile.create_profile(profile='raw', location=loc, date=snd_date, latitude=lat, missing=UWYODecoder.MISSING, **snd_dict)

        prof_coll = prof_collection.ProfCollection(
            {'':[ prof ]},
            [ snd_date ],
        )

        prof_coll.setMeta('loc', loc)
        prof_coll.setMeta('observed', True)
        return prof_coll
Пример #6
0
    def _parse(self):

        # time = datetime.now()

        file_data = self._downloadFile()
        modInit, modName, fHour, coords = file_data.split(' ')
        # if len(modInit) == 2:
        #     import time
        #     modInit = time.strftime('%Y%m%d')+modInit

        locationStr = coords.strip('\n')

        textFile = '/home/apache/climate/data/forecast/sndgs/'+modInit+'_'+modName+'_'+fHour+'_'+coords.strip('\n')+'_raw.txt'
        #writeTimes(textFile, 'Begin')

        data_header = 'Location: '

        time = datetime.strptime(modInit, '%Y%m%d%H') + timedelta(hours=int(fHour))

        # Determine if it's a site ID:
        if ',' not in coords:
            import numpy as np
            sites, siteCoords = np.genfromtxt('/home/apache/climate/hanis/model/fsound/text/sid.txt', dtype=str, unpack=True, delimiter=' ')
            i = np.where(sites == 'KORD')
            coords = siteCoords[i[0][0]]

        variables = fsonde_decoder.decode(modInit, modName, fHour, coords)
        #writeTimes(textFile, 'After Decode')

        pres = variables['pres']
        hght = variables['hght']
        tmpc = variables['temp']
        dwpc = variables['dewp']
        u = variables['ugrd']
        v = variables['vgrd']
        omeg = variables['omeg']
        
        wdir, wspd = utils.comp2vec(u, v)
        # wspd = [s*1.94384 for s in wspd]

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw', pres=pres, hght=hght, tmpc=tmpc, dwpc=dwpc, wdir=wdir, wspd=wspd, omeg=omeg, location=locationStr, date=time, latitude=35.)
        prof_coll = prof_collection.ProfCollection({'':[ prof ]},[ time ],)
        prof_coll.setMeta('loc', locationStr)
        #writeTimes(textFile, 'End')
        return prof_coll
Пример #7
0
    def _parse(self):
        file_data = self._downloadFile()
        ## read in the file
        data = np.array(
            [l.strip() for l in file_data.split('\n') if l.strip()])

        ## necessary index points
        title_idx = np.where(data == '%TITLE%')[0][0]
        start_idx = np.where(data == '%RAW%')[0][0] + 1
        finish_idx = np.where(data == '%END%')[0]
        # Made %END% unnecessary
        if finish_idx:
            finish_idx = finish_idx[0]
        else:
            finish_idx = max(n for n, line in enumerate(data)
                             if len(line.split(',')) == 6) + 1

        ## create the plot title
        data_header = data[title_idx + 1].split()
        location = data_header[0]
        time = datetime.strptime(data_header[1][:11], '%y%m%d/%H%M')
        if len(data_header) > 2:
            lat, lon = data_header[2].split(',')
            lat = float(lat)
            lon = float(lon)
        else:
            lat = 35.
            lon = -97.

        if time > datetime.utcnow() + timedelta(hours=1):
            # If the strptime accidently makes the sounding in the future (like with SARS archive)
            # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
            time = datetime.strptime('19' + data_header[1][:11], '%Y%m%d/%H%M')

        ## put it all together for StringIO
        full_data = '\n'.join(data[start_idx:finish_idx][:])

        if not is_py3():
            sound_data = StringIO(full_data)
        else:
            sound_data = BytesIO(full_data.encode())

        ## read the data into arrays
        p, h, T, Td, wdir, wspd = np.genfromtxt(sound_data,
                                                delimiter=',',
                                                comments="%",
                                                unpack=True)
        #       idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        pres = p  #[idx]
        hght = h  #[idx]
        tmpc = T  #[idx]
        dwpc = Td  #[idx]
        wspd = wspd  #[idx]
        wdir = wdir  #[idx]

        # Br00tal hack
        if hght[0] > 30000:
            hght[0] = -9999.00

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw',
                                      pres=pres,
                                      hght=hght,
                                      tmpc=tmpc,
                                      dwpc=dwpc,
                                      wdir=wdir,
                                      wspd=wspd,
                                      location=location,
                                      date=time,
                                      latitude=lat,
                                      missing=-9999.00)

        prof_coll = prof_collection.ProfCollection(
            {'': [prof]},
            [time],
        )

        prof_coll.setMeta('loc', location)
        prof_coll.setMeta('observed', True)
        prof_coll.setMeta('base_time', time)
        return prof_coll
Пример #8
0
    def _parse(self):
        global dyn_inset
        file_data = self._downloadFile()
        ## read in the file
        data = np.array([l.strip() for l in file_data.split('\n')])

        ## necessary index points
        #title_idx = np.where( 'Station:' in data )
        date_idx = data[0]
        title_idx = data[1]
        #print date_idx
        #print title_idx
        start_idx = (np.where(np.char.find(data, 'OMEGASTART') > -1)[0][0]) + 1
        #print data[start_idx]
        finish_idx = np.where(np.char.find(data, 'OMEGAEND') > -1)[0][0]

        ## create the plot title
        location = title_idx
        time = datetime.strptime(date_idx, '%a %d %b %Y | %H%M UTC')
        #print time.strftime('%Y %M %d %H')

        # data_header = 'Location: ' + location + ' ' + data[date_idx]
        # if 'analysis' in data[date_idx]:
        #     #print "analysis"
        #     timeStr = str(data[date_idx].split('for ')[1]).upper()
        #     time = datetime.strptime(timeStr, '%H%MZ %d %b %y')
        # else:
        #     #print "forecast"
        #     timeStr = str(data[date_idx].split('valid ')[1]).upper()
        #     time = datetime.strptime(timeStr, '%HZ %a %d %b %y')

        # if time > datetime.utcnow(): #If the strptime accidently makes the sounding the future:
        #     # If the strptime accidently makes the sounding in the future (like with SARS archive)
        #     # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
        #     time = datetime.strptime('19' + data_header[1][:11], '%Y%m%d/%H%M')

        ## put it all together for StringIO
        full_data = '\n'.join(data[start_idx:finish_idx][:])
        sound_data = StringIO(full_data)

        #print datetime.strftime('%Y %m %d %H',time)

        #full_data = np.array(full_data)
        #print sound_data

        ## read the data into arrays
        p, T, Td, h, wspd, wdir, omeg = np.genfromtxt(sound_data,
                                                      unpack=True,
                                                      usecols=(0, 1, 2, 3, 4,
                                                               5, 6))
        #idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        pres = p  #[idx]
        hght = h  #[idx]
        tmpc = T  #[idx]
        dwpc = Td  #[idx]
        wspd = wspd  #[idx]
        wdir = wdir  #[idxi]
        omeg = omeg  #[idx]
        print omeg
        if dwpc[0] < 40:
            dyn_inset = 'winter'
        else:
            dyn_inset = 'severe'

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw',
                                      pres=pres,
                                      hght=hght,
                                      tmpc=tmpc,
                                      dwpc=dwpc,
                                      wdir=wdir,
                                      wspd=wspd,
                                      omeg=omeg,
                                      location=location,
                                      date=time,
                                      latitude=35.)
        prof_coll = prof_collection.ProfCollection(
            {'': [prof]},
            [time],
        )
        prof_coll.setMeta('loc', location)
        print "Using Omega Decoder."
        return prof_coll
Пример #9
0
    def _parse(self):
        file_data = self._downloadFile()
        ## read in the file
        data = np.array([l.strip() for l in file_data.split('\n')])

        ## necessary index points
        start_idx = np.where(data == '<PRE>')[0]
        finish_idx = np.where(np.char.find(data, '</H3>') > -1)[0]
        time_idx = np.where(np.char.find(data, 'time') > -1)[0][0]
        latitude_idx = np.where(np.char.find(data, 'latitude') > -1)[0][0]

        ## create the plot title and time
        location = data[4].split()[1]
        time = datetime.strptime(data[time_idx].strip().split()[2],
                                 '%y%m%d/%H%M')
        latitude = data[latitude_idx].strip().split()[2]
        if time > datetime.utcnow(
        ):  #If the strptime accidently makes the sounding the future:
            # If the strptime accidently makes the sounding in the future (like with SARS archive)
            # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
            time = datetime.strptime('19' + data[time_idx].strip().split()[2],
                                     '%y%m%d/%H%M')

## put it all together for StringIO
        data = data[10:finish_idx][:]
        data_final = []
        max = 0
        for m in data:
            while '  ' in m:
                m = m.replace('  ', ' ')
            if len(m.split(' ')) != 11:
                continue
            if int(float(m.split(' ')[1])) <= max:
                continue
            data_final.append(m)
            max = int(float(m.split(' ')[1]))
        full_data = '\n'.join(data_final)
        while '  ' in full_data:
            full_data = full_data.replace('  ', ' ')
        sound_data = StringIO(full_data.strip())
        ## read the data into arrays
        p, h, T, Td, rh, mr, wdir, wspd, ta, te, tv = np.genfromtxt(
            sound_data, delimiter=' ', comments="%", unpack=True)
        #idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        pres = p  #[idx]
        hght = h  #[idx]
        tmpc = T  #[idx]
        dwpc = Td  #[idx]
        wspd = wspd  #[idx]
        wdir = wdir  #[idx]
        wdir_final = []
        for m in wdir:
            s = '0'
            if int(m) < 360:
                s = m
            wdir_final.append(s)

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw',
                                      pres=pres,
                                      hght=hght,
                                      tmpc=tmpc,
                                      dwpc=dwpc,
                                      wdir=wdir_final,
                                      wspd=wspd,
                                      location=location,
                                      date=time,
                                      latitude=float(latitude))

        prof_coll = prof_collection.ProfCollection(
            {'': [prof]},
            [time],
        )

        prof_coll.setMeta('loc', location)
        return prof_coll
Пример #10
0
    def _parse(self):
        """
        Parse the netCDF file according to the variable naming and
        dimmensional conventions of the WRF-ARW.
        """
        ## open the file and also store the lat/lon of the selected point
        file_data = self._downloadFile()
        gridx = self._file_name[1]
        gridy = self._file_name[2]

        ## calculate the nearest grid point to the map point
        idx = self._find_nearest_point(file_data, gridx, gridy)

        ## check to see if this is a 4D netCDF4 that includes all available times.
        ## If it does, open and compute the variables as 4D variables
        if len(file_data.variables["T"][:].shape) == 4:
            ## read in the data from the WRF file and conduct necessary processing
            theta = file_data.variables["T"][:, :, idx[0], idx[1]] + 300.0
            qvapr = file_data.variables["QVAPOR"][:, :, idx[0],
                                                  idx[1]] * 10**3  #g/kg
            mpres = (file_data.variables["P"][:, :, idx[0], idx[1]] +
                     file_data.variables["PB"][:, :, idx[0], idx[1]]) * .01
            mhght = file_data.variables[
                "PH"][:, :, idx[0],
                      idx[1]] + file_data.variables["PHB"][:, :, idx[0],
                                                           idx[1]] / G
            ## unstagger the height grid
            mhght = (mhght[:, :-1, :, :] + mhght[:, 1:, :, :]) / 2.

            muwin = file_data.variables["U"][:, :, idx[0], idx[1]]
            mvwin = file_data.variables["V"][:, :, idx[0], idx[1]]

            ## convert the potential temperature to air temperature
            mtmpc = thermo.theta(1000.0, theta - 273.15, p2=mpres)
            ## convert the mixing ratio to dewpoint
            mdwpc = thermo.temp_at_mixrat(qvapr, mpres)
            ## convert the grid relative wind to earth relative
            U = muwin * file_data.variables['COSALPHA'][
                0, idx[0], idx[1]] - mvwin * file_data.variables['SINALPHA'][
                    0, idx[0], idx[1]]
            V = mvwin * file_data.variables['COSALPHA'][
                0, idx[0], idx[1]] + muwin * file_data.variables['SINALPHA'][
                    0, idx[0], idx[1]]
            ## convert from m/s to kts
            muwin = utils.MS2KTS(U)
            mvwin = utils.MS2KTS(V)

        ## if the data is not 4D, then it must be assumed that this is a file containing only a single time
        else:
            ## read in the data from the WRF file and conduct necessary processing
            theta = file_data.variables["T"][:, idx[0], idx[1]] + 300.0
            qvapr = file_data.variables["QVAPOR"][:, idx[0],
                                                  idx[1]] * 10**3  #g/kg
            mpres = (file_data.variables["P"][:, idx[0], idx[1]] +
                     file_data.variables["PB"][:, idx[0], idx[1]]) * .01
            mhght = file_data.variables["PH"][:, idx[0],
                                              idx[1]] + file_data.variables[
                                                  "PHB"][:, idx[0], idx[1]] / G
            ## unstagger the height grid
            mhght = (mhght[:-1, :, :] + mhght[1:, :, :]) / 2.

            muwin = file_data.variables["U"][:, idx[0], idx[1]]
            mvwin = file_data.variables["V"][:, idx[0], idx[1]]

            ## convert the potential temperature to air temperature
            mtmpc = thermo.theta(1000.0, theta - 273.15, p2=mpres)
            ## convert the mixing ratio to dewpoint
            mdwpc = thermo.temp_at_mixrat(qvapr, mpres)
            ## convert the grid relative wind to earth relative
            U = muwin * file_data.variables['COSALPHA'][
                0, idx[0], idx[1]] - mvwin * file_data.variables['SINALPHA'][
                    0, idx[0], idx[1]]
            V = mvwin * file_data.variables['COSALPHA'][
                0, idx[0], idx[1]] + muwin * file_data.variables['SINALPHA'][
                    0, idx[0], idx[1]]
            ## convert from m/s to kts
            muwin = utils.MS2KTS(U)
            mvwin = utils.MS2KTS(V)

        ## get the model start time of the file
        inittime = dattim.datetime.strptime(str(file_data.START_DATE),
                                            '%Y-%m-%d_%H:%M:%S')

        profiles = []
        dates = []
        ## loop over the available times

        for i in range(file_data.variables["T"][:].shape[0]):
            ## make sure the arrays are 1D
            prof_pres = mpres[i].flatten()
            prof_hght = mhght[i].flatten()
            prof_tmpc = mtmpc[i].flatten()
            prof_dwpc = mdwpc[i].flatten()
            prof_uwin = muwin[i].flatten()
            prof_vwin = mvwin[i].flatten()
            ## compute the time of the profile
            try:
                delta = dattim.timedelta(
                    minutes=int(file_data.variables["XTIME"][i]))
                curtime = inittime + delta
            except KeyError:
                var = ''.join(
                    np.asarray(file_data.variables['Times'][i], dtype=str))
                curtime = dattim.datetime.strptime(var, '%Y-%m-%d_%H:%M:%S')
            date_obj = curtime

            ## construct the profile object
            prof = profile.create_profile(profile="raw",
                                          pres=prof_pres,
                                          hght=prof_hght,
                                          tmpc=prof_tmpc,
                                          dwpc=prof_dwpc,
                                          u=prof_uwin,
                                          v=prof_vwin,
                                          location=str(gridx) + "," +
                                          str(gridy),
                                          date=date_obj,
                                          missing=-999.0,
                                          latitude=gridy,
                                          strictQC=False)

            ## append the dates and profiles
            profiles.append(prof)
            dates.append(date_obj)

        ## create a profile collection - dictionary has no key since this
        ## is not an ensemble model
        prof_coll = prof_collection.ProfCollection({'': profiles}, dates)

        return prof_coll
Пример #11
0
    def _parse(self):
        global dyn_inset
	file_data = self._downloadFile()
        ## read in the file
        data = np.array([l.strip() for l in file_data.split('\n')])

        ## necessary index points
        #title_idx = np.where( 'Station:' in data )
        title_idx = np.where( np.char.find(data,'Station:') > -1)[0][0]
        date_idx = np.where( np.char.find(data,'Date: ') > -1 )[0][0]
        start_idx = np.where( np.char.find(data,'SFC') > -1 )[0][0]
        finish_idx = np.where( np.char.find(data,'TRP') > -1 )[0][0]

        ## create the plot title
        location = data[title_idx].split('Station: ')[1]
        data_header = 'Location: ' + location + ' ' + data[date_idx]
        timeStr = str(data[date_idx].split('Date: ')[1]).upper()
        time = datetime.strptime(timeStr, '%H%MZ %d %b %y')

        
        # if time > datetime.utcnow(): #If the strptime accidently makes the sounding the future:
        #     # If the strptime accidently makes the sounding in the future (like with SARS archive)
        #     # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
        #     time = datetime.strptime('19' + data_header[1][:11], '%Y%m%d/%H%M')

        # ---------------------------- Clean up the data ------------------------------#
        # Make sure we have the right number of fields.
        # Commonly at the end of the sounding, wind speed and direciton are missing.
        # This check takes care of that, eliminating a row if it's undersized.

        dirtyData = data[start_idx : (finish_idx)][:]
        cleanData = []
        for line in dirtyData:
            numItems = 0
            items = line.split(' ')
            for item in items:
                if item != '':
                    numItems += 1
            if numItems == 15:
                cleanData.append(line)
        # -----------------------------------------------------------------------------#

        ## put it all together for StringIO
        full_data = '\n'.join(cleanData)
        sound_data = StringIO( full_data )

        ## read the data into arrays
        p, h, T, Td, wdir, wspd = np.genfromtxt( sound_data, unpack=True, usecols=(1,2,3,4,8,9) )
        #idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        # ----------------------- More Cleaning ----------------------- #
        # SHARPpy doesn't like directions of 360, convert those to 0:
        wdir = [0 if x==360 else x for x in wdir]

        # If there is a duplicate height entry (common),
        # Just add an extra meter, and that'll be our little secret.  ;)
        for key,height in enumerate(h):
            if key == 0:
                continue
            if height == h[key-1]:
                h[key] += 1
        # ------------------------------------------------------------- #

        pres = p #[idx]
        hght = h #[idx]
        tmpc = T #[idx]
        dwpc = Td #[idx]
        wspd = wspd #[idx]
        wdir = wdir #[idxi]
	if dwpc[0] < 40:
		dyn_inset = 'winter'
	else:
		dyn_inset = 'severe'

        # Force latitude to be 35 N. Figure out a way to fix this later.
        prof = profile.create_profile(profile='raw', pres=pres, hght=hght, tmpc=tmpc, dwpc=dwpc, wdir=wdir, wspd=wspd, location=location, date=time, latitude=35.)
        prof_coll = prof_collection.ProfCollection({'':[ prof ]},[ time ],)
        prof_coll.setMeta('loc', location)
        return prof_coll
Пример #12
0
    def _parse(self):
        file_data = self._downloadFile()
        ## read in the file
        data = np.array([l.strip() for l in file_data.split('\n')])

        # Find the cloud info line in the text file.
        cloud_list = []
        for item in data:
            if 'ctf_low' in item:
                cloud_list.append(item)

        # Attempt to unpack the list containing the cloud information.
        try:
            cloud_line = cloud_list[0]
            cloud_flag = True
        except:
            cloud_flag = False

        # Assign CTF and CTP to local variables.
        if cloud_flag is True:
            ctf_low_string = cloud_line.split(',')[0]
            ctf_high_string = cloud_line.split(',')[1]
            ctp_low_string = cloud_line.split(',')[2]
            ctp_high_string = cloud_line.split(',')[3]
            ctf_low = ctf_low_string.split(' ')[1]
            ctf_high = ctf_high_string.split(' ')[1]
            ctp_low = int(ctp_low_string.split(' ')[1])
            ctp_high = int(ctp_high_string.split(' ')[1])

            # Skew-T won't launch if cloud top pressure < 100mb.
            # Set variables to dummy values so it doesn't try to draw the CTP line out of bounds.
            if ctp_low < 100:
                ctp_low = 3000
            if ctp_high < 100:
                ctp_high = 3000
        else:
            # Assign missing values just in case the cloud info is not in the text file.
            ctf_low = -99999
            ctf_high = -99999
            ctp_low = 3000
            ctp_high = 3000

        ## necessary index points
        title_idx = np.where(data == '%TITLE%')[0][0]
        start_idx = np.where(data == '%RAW%')[0][0] + 1
        finish_idx = np.where(data == '%END%')[0][0]

        ## create the plot title
        data_header = data[title_idx + 1].split()
        location = data_header[0]
        time = datetime.strptime(data_header[1][:11], '%y%m%d/%H%M')
        if len(data_header) > 2:
            lat, lon = data_header[2].split(',')
            lat = float(lat)
            lon = float(lon)
        else:
            lat = 35.
            lon = -97.

        if time > datetime.utcnow() + timedelta(hours=1):
            # If the strptime accidently makes the sounding in the future (like with SARS archive)
            # i.e. a 1957 sounding becomes 2057 sounding...ensure that it's a part of the 20th century
            time = datetime.strptime('19' + data_header[1][:11], '%Y%m%d/%H%M')

        ## put it all together for StringIO
        full_data = '\n'.join(data[start_idx:finish_idx][:])

        if not is_py3():
            sound_data = StringIO(full_data)
        else:
            sound_data = BytesIO(full_data.encode())

        ## read the data into arrays
        p, h, T, Td, wdir, wspd = np.genfromtxt(sound_data,
                                                delimiter=',',
                                                comments="%",
                                                unpack=True)
        #       idx = np.argsort(p, kind='mergesort')[::-1] # sort by pressure in case the pressure array is off.

        pres = p  #[idx]
        hght = h  #[idx]
        tmpc = T  #[idx]
        dwpc = Td  #[idx]
        wspd = wspd  #[idx]
        wdir = wdir  #[idx]

        # Br00tal hack
        if hght[0] > 30000:
            hght[0] = -9999.00

        # Force latitude to be 35 N. Figure out a way to fix this later.
        # Added cloud top parameters to profile object.
        prof = profile.create_profile(profile='raw',
                                      pres=pres,
                                      hght=hght,
                                      tmpc=tmpc,
                                      dwpc=dwpc,
                                      wdir=wdir,
                                      wspd=wspd,
                                      location=location,
                                      date=time,
                                      latitude=lat,
                                      missing=-9999.00,
                                      ctf_low=ctf_low,
                                      ctf_high=ctf_high,
                                      ctp_low=ctp_low,
                                      ctp_high=ctp_high)

        prof_coll = prof_collection.ProfCollection(
            {'': [prof]},
            [time],
        )

        prof_coll.setMeta('loc', location)
        prof_coll.setMeta('observed', True)
        prof_coll.setMeta('base_time', time)
        prof_coll.setMeta('ctf_low', ctf_low)
        prof_coll.setMeta('ctf_high', ctf_high)
        prof_coll.setMeta('ctp_low', ctp_low)
        prof_coll.setMeta('ctp_high', ctp_high)
        return prof_coll