Esempio n. 1
0
 def get_opasses(self, file, sectorfiles=None):
     log.info('Running pass predictor on: ' + str(file))
     # Paths sometimes mess this up. Use os.path.basename
     fn = DataFileName(os.path.basename(file))
     sfn = fn.create_standard(downloadSiteObj=self)
     if sectorfiles:
         opasses = pass_prediction([sfn.satname], [sfn.sensorname],
                                   None,
                                   None,
                                   sfn.datetime - timedelta(hours=9),
                                   sfn.datetime,
                                   single=True,
                                   force=True,
                                   sectorfiles=sectorfiles)
     elif self.sector_file:
         opasses = pass_prediction([sfn.satname], [sfn.sensorname],
                                   self.sector_file,
                                   self.sectorlist,
                                   sfn.datetime - timedelta(hours=9),
                                   sfn.datetime,
                                   force=True,
                                   single=True)
     else:
         opasses = pass_prediction([sfn.satname], [sfn.sensorname],
                                   None,
                                   None,
                                   sfn.datetime - timedelta(hours=9),
                                   sfn.datetime,
                                   force=True,
                                   single=True)
     return opasses
Esempio n. 2
0
    def read(self,fname,datavars,gvars,metadata,chans=None,sector_definition=None):
        fileobj = h5py.File(str(fname), mode='r')

        header = fileobj.attrs['FileHeader']
        metadata['top']['platform_name'] = get_header_info(header,'SatelliteName').lower()
        metadata['top']['source_name'] = get_header_info(header,'InstrumentName').lower()
        dfn = DataFileName(os.path.basename(fname))
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['dataprovider'] = sdfn.dataprovider 
        else:
            metadata['top']['dataprovider'] = 'Unknown'


        for dsname in self.dataset_info.keys():

            londata = fileobj[dsname]['Longitude'].value
            latdata = fileobj[dsname]['Latitude'].value

            yyyy = fileobj[dsname]['ScanTime']['Year'].value
            mm = fileobj[dsname]['ScanTime']['Month'].value
            dd = fileobj[dsname]['ScanTime']['DayOfMonth'].value
            hh = fileobj[dsname]['ScanTime']['Hour'].value
            mn = fileobj[dsname]['ScanTime']['Minute'].value
            ss = fileobj[dsname]['ScanTime']['Second'].value

            metadata['top']['start_datetime'] = datetime.strptime("%04d%02d%02d%02d%02d%02d"%(yyyy[0],mm[0],dd[0],hh[0],mn[0],ss[0]),'%Y%m%d%H%M%S')
            # Note an apparent bug in productfilename uses end_datetime as filename. 
            # For now just leave out end_datetime (it automatically gets set to start_datetime
            # in scifile if undefined)
            # Ooops, this might have messed up pass predictor
            metadata['top']['end_datetime'] = datetime.strptime("%04d%02d%02d%02d%02d%02d"%(yyyy[-1],mm[-1],dd[-1],hh[-1],mn[-1],ss[-1]),'%Y%m%d%H%M%S')
            metadata['top']['filename_datetime'] = metadata['top']['start_datetime']

            # Tells driver to NOT try to sector this data.
            metadata['top']['NON_SECTORABLE'] = True

            if chans == []:
                return

            numchans = fileobj[dsname]['Tb'].value.shape[2]
            tbdatalist = np.dsplit(fileobj[dsname]['Tb'].value,numchans)
            tbdata = []

            ii= 0
            for data in tbdatalist:
                for currgeoipsvarname,ncbandnum in self.dataset_info[dsname].items():
                    if ii == ncbandnum:
                        geoipsvarname = currgeoipsvarname
                if not chans or geoipsvarname in chans:
                    datavars[dsname][geoipsvarname] = np.ma.masked_equal(np.squeeze(data),-999)
                ii += 1
            gvars[dsname]['Latitude'] = latdata
            gvars[dsname]['Longitude'] = londata
            metadata['gvars'][dsname]['Longitude'] = _empty_varinfo.copy()
            metadata['gvars'][dsname]['Latitude'] = _empty_varinfo.copy()
            metadata['gvars'][dsname]['Longitude']['nomask'] = True
            metadata['gvars'][dsname]['Latitude']['nomask'] = True

        return 
Esempio n. 3
0
 def get_final_filename(self,file):
     ''' Keep original filename, but use standard path
         The default for this method uses GeoIPS standard
         filename and GeoIPS standard path. '''
     fn = DataFileName(os.path.basename(file))
     sdfn = fn.create_standard(downloadSiteObj=self)
     return os.path.dirname(sdfn.name)+'/'+os.path.basename(file)
Esempio n. 4
0
 def get_logfname(self, file, geoips=True, postfix=None):
     if os.path.isdir(file):
         dfn = DataFileName(os.path.basename(glob(file + '/*')[0]))
     else:
         dfn = DataFileName(os.path.basename(file))
     sdfn = dfn.create_standard(downloadSiteObj=self)
     return sdfn.create_logfile(geoips=geoips)
Esempio n. 5
0
 def get_final_filename(self, file):
     '''Eventually this will be standard for everything in Site.py
     for now test with rscat/viirs only. This returns a string'''
     fn = DataFileName(os.path.basename(file))
     sdfn = fn.create_standard(downloadSiteObj=self)
     if self.get_final_ext():
         sdfn.ext = self.get_final_ext()
     return sdfn.name
Esempio n. 6
0
 def get_filenamedatetime(self, file):
     '''Eventually this will be standard for everything in Site.py
     for now test with rscat/viirs only. This returns a datetime
     object, not a FileName object.'''
     fn = DataFileName(os.path.basename(file))
     if fn:
         sfn = fn.create_standard(downloadSiteObj=self)
     else:
         return None
     return sfn.datetime
Esempio n. 7
0
 def opass_overlaps(obj, opass, fnstr):
     '''Eventually this will be standard for everything in Site.py
     for now test with rscat/viirs only. '''
     # Paths sometimes mess this up. Use os.path.basename
     fn = DataFileName(os.path.basename(fnstr))
     sfn = fn.create_standard(downloadSiteObj=obj)
     sfn_enddt = sfn.datetime + timedelta(
         minutes=fn.sensorinfo.mins_per_file)
     #log.info('opass: '******'sfn: '+str(sfn)+' sfn_enddt: '+str(sfn_enddt))
     overlaps = sfn.is_concurrent_with(opass.startdt, sfn.datetime,
                                       opass.enddt, sfn_enddt)
     #log.info('overlaps: '+str(overlaps))
     return overlaps
Esempio n. 8
0
    def run_on_files(self, final_file):
        ''' The default for this method runs on every file that 
            is downloaded.  Overriding for ABI to only process when
            a directory contains 16 files. Also allow for limiting
            the frequency we actually try to process if we can't
            keep up '''

        runtype = 'RadF'

        dirname = os.path.dirname(final_file)
        # List all RadF files
        listthesefiles = dirname + '/*{0}*'.format(runtype)

        files = glob(listthesefiles)
        num_files = len(files)
        log.info('  In run_on_files TESTLOCAL_TESTABI ' + str(num_files) +
                 ' files in directory ' + listthesefiles)

        # Limit frequency we actually process
        dfn = DataFileName(os.path.basename(final_file)).create_standard()
        if dfn.datetime.minute != 0 and dfn.datetime.minute != 30:
            log.info('ONLY RUNNING 0 30 MINUTES FOR NOW. Skipping processing')
            return []

        # Once we get 16 files, and the current file is RadF, kick off processing
        if num_files == 16 and runtype in final_file:
            return [final_file]
        else:
            return []
Esempio n. 9
0
    def getfile(self, remotefile, localfile):
        processingfile = localfile + '.processing'
        ff = open(processingfile, 'w')
        ff.close
        log.info('Touching temporary file: ' +
                 os.path.basename(processingfile))
        temp_filename = DataFileName(localfile).create_scratchfile()
        temp_filename.makedirs()
        temp_fnstr = temp_filename.name
        if not self.downloadactive:
            log.info(
                '      *** nodownload set, not downloading remotefile %s ' %
                remotefile)
            log.info('      ***     to localfile %s' % localfile)
        else:
            log.info('      *** grabbing remotefile %s ' % remotefile)
            log.info('      ***     to localfile %s' % localfile)
            self.wget_file(remotefile, temp_fnstr)

        self.move_to_final(temp_fnstr, processingfile, localfile)
Esempio n. 10
0
    def sort_files(self, filelist):
        '''Override sort_files function found in Site.py. '''
        #print filelist
        #log.info('    Creating list of DataFileName objects to sort')
        newfilelist = [DataFileName(file) for file in filelist]
        #log.info('        Sorting list')
        newfilelist.sort(cmp, key=lambda x: x.datetime, reverse=True)

        #date_pattern = re.compile('RS.*\.([0-9]{11})')
        #log.debug('filelist: '+str(filelist))
        #filelist.sort(cmp,key=lambda file:date_pattern.search(file).group(1),reverse=True)
        #log.debug('filelist: '+str(filelist))
        #log.info('        Creating list of string objects to return')
        filelist = [file.name for file in newfilelist]
        return filelist
Esempio n. 11
0
    def convert(self, file, final_filename):
        '''The convert method is called from the postproc method. The default
        method in Site.py just moves from file to final_filename. If there are
        actual conversions that have to happen, this will likely be overriden
        in the subclasses of Site ([host_type].py, [host_type]_[data_type].py)'''

        log.info('Using Site convert')
        dfn = DataFileName(os.path.basename(file))

        if os.path.isfile(file):
            # if file is gzipped, unzip first
            if hasattr(self, 'untgzfiles') and self.untgzfiles and (
                    'tar.gz' in dfn.ext or 'tgz' in dfn.ext):
                log.info('   ****untgzing file ' + file)
                return self.untgz(file)
            elif hasattr(self,
                         'unbz2files') and self.unbz2files and ('.bz2'
                                                                in dfn.ext):
                log.info('   ****bunzip2 file ' + file)
                retval = os.system('bunzip2 ' + file)
                log.info('   ****moving file to ' + final_filename)
                if os.path.isfile(os.path.splitext(file)[0]):
                    shutil.move(os.path.splitext(file)[0], final_filename)
                    return [final_filename]
                elif retval != 0:
                    log.info('    ****Final file failed!')
                    return []
            elif 'gz' in dfn.ext and 'tgz' not in dfn.ext:
                log.info('   ****gunzipping file ' + file)
                retval = os.system('gunzip ' + file)
                log.info('   ****moving file to ' + final_filename)
                if os.path.isfile(os.path.splitext(file)[0]):
                    shutil.move(os.path.splitext(file)[0], final_filename)
                    return [final_filename]
                elif retval != 0:
                    log.info('    ****Final file failed!')
                    return []
            else:
                log.info('   ****moving file to ' + final_filename)
                shutil.move(file, final_filename)
                return [final_filename]
        else:
            log.info('File ' + file + ' does not exist, not moving')
            return []
Esempio n. 12
0
    def postproc(self,
                 file,
                 geoips_args=None,
                 forceprocess=False,
                 noprocess=False):
        '''The postproc method is called from downloaders.Sites.Site.get
            After a file has been successfully downloaded, postproc is 
            called on that file to do any necessary conversions, post
            processing, etc. 
            This should not be overridden in the subclasses, but methods that are
                called from postproc can be overridden to customize:

            run_on_files:
                Check for existence of certain files before kicking off processing
                Also can put in pre-geoips processing steps in here (to either run on
                    individual file, or group of files. Note this runs serially, so 
                    don't put anything in here that takes a long time!)
            convert:
                perform special conversions (by default gunzips and moves to final_filename)
            get_final_filename:
                by default uses GeoIPS standard data filename (can override to use original, etc)
            pre_legacy_procs:
                No one uses this - called before ppscript, if returns True, then run ppscript
                Defaults to just returning True
            pp_script:
                attribute set on subclass that is the legacy postprocs call. 
                If set, pp_script will be run
        '''

        final_filename = self.get_final_filename(file)

        #log.info('Using Site postproc')

        # The default version of this (found above in Site.py) just moves
        # file to final_filename. Can be overriden in host/data classes
        # to actually convert the file, not just change filename...
        if not glob(final_filename) or forceprocess:
            if forceprocess:
                log.interactive(
                    'Force running convert/process (called immediately after successful download?)'
                )
            else:
                log.interactive('Final file: ' + final_filename +
                                ' did not exist, reconverting/reprocessing')

            # convert method returns a list of files that need processing -
            # amsu converter, for instance, creates multiple files from a
            # single input file
            for final_file in self.convert(file, final_filename):
                if noprocess:
                    log.info(
                        'SKIPPING processing on file, downloader called with --noprocess '
                        + final_file)
                    continue
                # If self.pp_script is defined in [host_type].py or
                # [host_type]_[data_type].py, run postprocs
                runpp = True
                try:
                    log.info('   ****qsubbing ' +
                             os.path.basename(self.pp_script) + ' ' +
                             final_file)
                except (NameError, AttributeError):
                    log.info('   ****No non-GeoIPS postprocs defined')
                    runpp = False
                if runpp:
                    for file in self.run_on_files(final_file):
                        log.info(
                            '    ****Running non-GeoIPS postprocessing on ' +
                            os.path.basename(file))
                        if self.pre_legacy_procs(file):
                            # pp_script may have arguments, so split on spaces and just use the
                            # basename of the first argument (script name) for logname
                            log_fname = self.get_logfname(file, geoips=False)
                            log_fname.makedirs()
                            resource_list = None
                            qsub(self.pp_script + ' ' + file +
                                 os.getenv('LEGACY_PROCSDIR_CALL_AFTER'), [],
                                 queue=self.queue,
                                 name=log_fname.qsubname,
                                 resource_list=resource_list,
                                 outfile=log_fname.name,
                                 suffix='log')

                dfn = DataFileName(os.path.basename(final_file))
                sdfn = dfn.create_standard(downloadSiteObj=self)

                if self.run_geoips == True:
                    # self.run_on_files defaults to returning final_file
                    # can override run_on_files function in host_type/data_type
                    # subclass (ie, for modis where we must have a specific
                    # set of files before we can run driver)
                    for file in self.run_on_files(final_file):
                        log_fname = self.get_logfname(file)
                        log_fname.makedirs()
                        log.info(log_fname.name)
                        # geoips_args are set in downloader based on sector_file information (static vs dynamic),
                        # and passed arguments for sectorfiles / sectorlist.
                        # Do NOT run pass predictor in downloader anymore - now it is run in driver.
                        # but we still need to make sure we allow for passing a list of sectors to downloader...

                        if not geoips_args:
                            #arglist = [file,'--queue batch@kahuna','-s "'+' '.join(sectorlist)+'"']
                            arglist = [file, '--queue batch@kahuna']
                            # Currently setting mp_max_cpus and mp_mem_per_cpu in individual Site inits
                            if hasattr(self, 'mp_max_cpus'):
                                arglist += [
                                    '--mp_max_cpus ' +
                                    str(int(self.mp_max_cpus))
                                ]
                        else:
                            dfn = DataFileName(os.path.basename(file))
                            if dfn.sensorinfo.FName['runfulldir']:
                                geoips_args.addarg(os.path.dirname(file))
                            else:
                                geoips_args.addarg(file)
                            log.info(geoips_args.options)
                            # Currently setting mp_max_cpus and mp_mem_per_cpu in individual Site inits
                            if hasattr(self, 'mp_max_cpus'):
                                geoips_args.addopt('mp_max_cpus',
                                                   str(int(self.mp_max_cpus)))
                            arglist = geoips_args.get_arglist()
                            # Remove the file we just added so we can
                            # add the next one
                            geoips_args.delarg(0)

                        # Currently setting mp_max_cpus and mp_mem_per_cpu in individual Site inits
                        if hasattr(self, 'mp_max_cpus'):
                            resource_list = os.getenv('PBS_RESOURCE_LIST_SELECT')+str(int(self.mp_max_cpus))+\
                                            os.getenv('PBS_RESOURCE_LIST_MEM')+str(int(self.mp_mem_per_cpu*self.mp_max_cpus))+'gb'+\
                                            os.getenv('PBS_RESOURCE_LIST_QLIST')
                        elif hasattr(self, 'pbs_resource_list'):
                            resource_list = self.pbs_resource_list
                        else:
                            resource_list = None

                        if hasattr(self, 'geoips_executable'):
                            geoips_executable = self.geoips_executable
                        else:
                            geoips_executable = gpaths[
                                'GEOIPS'] + '/geoips/driver.py'

                        qsub(
                            geoips_executable,
                            arglist,
                            queue=self.queue,
                            #name='GW'+self.data_type+'_'+self.host_type,
                            name=log_fname.qsubname,
                            resource_list=resource_list,
                            outfile=log_fname.name)
Esempio n. 13
0
    def getsinglefilelist(self,
                          start_time,
                          end_time,
                          searchstring,
                          login=True,
                          subdir=None):

        if subdir is not None:
            fullurlpath = self.baseurl + '/' + subdir
        else:
            fullurlpath = self.baseurl

        log.info('')
        log.info('fullurlpath: ' + str(fullurlpath) + '/' + str(searchstring))

        indexhtmlfile = DataFileName()
        indexhtmlfile.satname = 'wgetsite'
        indexhtmlfile.sensorname = self.data_type
        indexhtmlfile.dataprovider = self.host_type
        # Hmm, FileName object should probably set this when datetime is set?
        dt = datetime.utcnow()
        indexhtmlfile.time = dt.strftime(indexhtmlfile.datetime_fields['time'])
        indexhtmlfile.date = dt.strftime(indexhtmlfile.datetime_fields['date'])
        indexhtmlfile.extra = 'index'
        indexhtmlfile.ext = 'html'
        indexhtmlfile = indexhtmlfile.create_scratchfile()
        indexhtmlfile.makedirs()
        indexhtmlfnstr = indexhtmlfile.name
        log.info(indexhtmlfnstr)

        # rcj 13DEC2018 this part doesn't need to run for lance modis data
        # it is handled in lance_modis.py getfilelist
        if hasattr(self,
                   'host') and (self.host == 'nrt3.modaps.eosdis.nasa.gov'
                                or self.host == 'nrt4.modaps.eosdis.nasa.gov'):
            pass
        #everything that is not lance modis that uses this script should still pass through here
        else:
            htmlfilelist = open(self.wget_file(fullurlpath,
                                               indexhtmlfnstr)).readlines()
            #getfiles = self.getLinksFromHTML(htmlfilelist,r'''.*a href="GAASP-MBT_v"."r"."GW"."_s[0-9]{14}".*''')
            #log.info(htmlfilelist)
            links = self.getLinksFromHTML(htmlfilelist, searchstring)

            # This is defined in Site.py - finding the files in the file list is
            # common between HTTP and FTP (getting the lists differs, but sorting
            # through the list and returning the desired files is shared)
            return self.find_files_in_range(links,
                                            start_time,
                                            end_time,
                                            urlpath=fullurlpath)
Esempio n. 14
0
    def read(self,
             fname,
             datavars,
             gvars,
             metadata,
             chans=None,
             sector_definition=None):

        f1 = open(fname, 'rb')

        #READ HEARDER
        sw_rev = np.fromstring(f1.read(2),
                               dtype=np.dtype('short')).byteswap()[0]
        endian, fileid = np.fromstring(f1.read(2),
                                       dtype=np.dtype('int8')).byteswap()
        rev = np.fromstring(f1.read(4), dtype=np.dtype('int32')).byteswap()
        year = np.fromstring(f1.read(4), dtype=np.dtype('int32')).byteswap()
        jday = np.fromstring(f1.read(2), dtype=np.dtype('short')).byteswap()
        hour, minu = np.fromstring(f1.read(2),
                                   dtype=np.dtype('int8')).byteswap()
        satid, nsdr = np.fromstring(f1.read(4),
                                    dtype=np.dtype('short')).byteswap()
        spare1, spare2, spare3 = np.fromstring(
            f1.read(3), dtype=np.dtype('int8')).byteswap()
        proc_stat_flags = np.fromstring(f1.read(1),
                                        dtype=np.dtype('int8')).byteswap()
        spare4 = np.fromstring(f1.read(4), dtype=np.dtype('int32')).byteswap()
        #Need to set up time to be read in by the metadata (year and jday are arrays)
        time = '%04d%03d%02d%02d' % (year[0], jday[0], hour, minu)
        nbytes = 28  #bytes that have been read in
        #Read scan records at 512-byte boundaries
        nfiller = 512 - (
            nbytes % 512
        )  # skip nfiller bytes so that the scan header will start at the 513th byte of the data records,
        filler_bytes = np.fromstring(f1.read(nfiller),
                                     dtype=np.dtype('int8')).byteswap()

        # Rev 6A of the SSMIS SDR software changed the scalling of channel 12-16 to 100 (it was 10 before this change)
        #     effective with orbit rev 12216 for F-16 and thereafter for all future satellites
        rev6a = 1
        if satid == 1 and rev[0] < 12216:
            rev6a = 0

        if satid == 1:
            satid = 'f16'
        elif satid == 2:
            satid = 'f17'
        elif satid == 3:
            satid = 'f18'
        elif satid == 4:
            satid = 'f19'
        else:
            return False

        #Enter metadata
        metadata['top']['start_datetime'] = datetime.strptime(time, '%Y%j%H%M')
        metadata['top']['end_datetime'] = datetime.strptime(time, '%Y%j%H%M')
        metadata['top']['dataprovider'] = 'DMSP'
        metadata['top']['filename_datetime'] = metadata['top'][
            'start_datetime']
        metadata['top']['platform_name'] = satid
        metadata['top']['source_name'] = 'ssmis'
        si = SatSensorInfo(metadata['top']['platform_name'],
                           metadata['top']['source_name'])
        if not si:
            from ..scifileexceptions import SciFileError
            raise SciFileError(
                'Unrecognized platform and source name combination: ' +
                metadata['top']['platform_name'] + ' ' +
                metadata['top']['source_name'])

        dfn = DataFileName(os.path.basename(fname))
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['filename_datetime'] = sdfn.datetime

        # Tells driver to NOT try to sector this data.
        metadata['top']['NON_SECTORABLE'] = True

        if chans == []:
            return

        bad_value = -999

        for nn in range(nsdr):  #loop number of sdr data records
            log.info('    Reading sdr #' + str(nn) + ' of ' + str(nsdr))
            nbytes = 0

            #SCAN HEADER
            syncword = np.fromstring(f1.read(4),
                                     dtype=np.dtype('int32')).byteswap()
            scan_year = np.fromstring(f1.read(4),
                                      dtype=np.dtype('int32')).byteswap()
            scan_jday = np.fromstring(f1.read(2),
                                      dtype=np.dtype('short')).byteswap()
            scan_hour, scan_minu = np.fromstring(
                f1.read(2), dtype=np.dtype('int8')).byteswap()
            scan = np.fromstring(f1.read(4),
                                 dtype=np.dtype('int32')).byteswap()
            nscan_imager, nscan_enviro, nscan_las, nscan_uas = np.fromstring(
                f1.read(4), dtype=np.dtype('int8')).byteswap()
            start_scantime_imager = np.fromstring(
                f1.read(112), dtype=np.dtype('int32')).byteswap()
            scenecounts_imager = np.fromstring(
                f1.read(28), dtype=np.dtype('uint8')).byteswap()
            start_scantime_enviro = np.fromstring(
                f1.read(96), dtype=np.dtype('int32')).byteswap()
            scenecounts_enviro = np.fromstring(
                f1.read(24), dtype=np.dtype('uint8')).byteswap()
            start_scantime_las = np.fromstring(
                f1.read(32), dtype=np.dtype('int32')).byteswap()
            scenecounts_las = np.fromstring(
                f1.read(8), dtype=np.dtype('uint8')).byteswap()
            start_scantime_uas = np.fromstring(
                f1.read(16), dtype=np.dtype('int32')).byteswap()
            scenecounts_uas = np.fromstring(
                f1.read(4), dtype=np.dtype('uint8')).byteswap()
            spare = np.fromstring(f1.read(20),
                                  dtype=np.dtype('int32')).byteswap()
            nbytes += 360  #total bytes of the scan header
            nscan0 = scan - 1
            #-----------------------------------------------------------------------------------------
            try:
                imager_read = np.ma.zeros((nscan_imager, 180))
                np.ma.masked_all_like(imager_read)
            except:
                print 'Shell dropped for imager_read'
            if scenecounts_imager[0] < 0:
                print "IMAGER is negative"
            lt = np.ma.masked_values(imager_read, bad_value)
            lg = np.ma.masked_values(imager_read, bad_value)
            ch08 = np.ma.masked_values(imager_read, bad_value)
            ch09 = np.ma.masked_values(imager_read, bad_value)
            ch10 = np.ma.masked_values(imager_read, bad_value)
            ch11 = np.ma.masked_values(imager_read, bad_value)
            ch17 = np.ma.masked_values(imager_read, bad_value)
            ch18 = np.ma.masked_values(imager_read, bad_value)
            surf = np.ma.masked_values(imager_read, bad_value)
            rain = np.ma.masked_values(imager_read, bad_value)

            #IMAGER READ DATA
            for ii in range(nscan_imager):
                if start_scantime_imager[ii] == -999:
                    print 'value of imager scan is %d' % ii
                    continue
                for jj in range(scenecounts_imager[ii]):
                    imager_lat, imager_lon, imager_scene = np.fromstring(
                        f1.read(6), dtype=np.dtype('short')).byteswap()
                    imager_surf, imager_rain = np.fromstring(
                        f1.read(2), dtype=np.dtype('int8')).byteswap()
                    imager_ch08, imager_ch09, imager_ch10, imager_ch11, imager_ch17, imager_ch18 = np.fromstring(
                        f1.read(12), dtype=np.dtype('short')).byteswap()
                    nbytes += 20
                    k = 180 * (nscan0 + ii) + jj
                    lat = 0.01 * imager_lat
                    lon = 0.01 * imager_lon
                    try:
                        lt[ii][jj] = lat
                        lg[ii][jj] = lon
                        ch08[ii][jj] = imager_ch08  #150    Ghz
                        ch09[ii][jj] = imager_ch09  #183+-7
                        ch10[ii][jj] = imager_ch10  #183+-3
                        ch11[ii][jj] = imager_ch11  #183+-1
                        ch17[ii][jj] = imager_ch17  #91V
                        ch18[ii][jj] = imager_ch18  #91H
                        surf[ii][jj] = imager_surf
                        rain[ii][jj] = imager_rain
                    except:
                        print 'Failed setting arrays in scan_imager'

            if 'Latitude' not in gvars['IMAGER'].keys():
                gvars['IMAGER']['Latitude'] = lt
                gvars['IMAGER']['Longitude'] = lg
                datavars['IMAGER']['ch08'] = ch08
                datavars['IMAGER']['ch09'] = ch09
                datavars['IMAGER']['ch10'] = ch10
                datavars['IMAGER']['ch11'] = ch11
                datavars['IMAGER']['ch17'] = ch17
                datavars['IMAGER']['ch18'] = ch18
                datavars['IMAGER']['surface'] = surf
                datavars['IMAGER']['rain'] = rain
            else:
                gvars['IMAGER']['Latitude'] = np.ma.vstack(
                    (gvars['IMAGER']['Latitude'], lt))
                gvars['IMAGER']['Longitude'] = np.ma.vstack(
                    (gvars['IMAGER']['Longitude'], lg))
                datavars['IMAGER']['ch08'] = np.ma.vstack(
                    (datavars['IMAGER']['ch08'], ch08))
                datavars['IMAGER']['ch09'] = np.ma.vstack(
                    (datavars['IMAGER']['ch09'], ch09))
                datavars['IMAGER']['ch10'] = np.ma.vstack(
                    (datavars['IMAGER']['ch10'], ch10))
                datavars['IMAGER']['ch11'] = np.ma.vstack(
                    (datavars['IMAGER']['ch11'], ch11))
                datavars['IMAGER']['ch17'] = np.ma.vstack(
                    (datavars['IMAGER']['ch17'], ch17))
                datavars['IMAGER']['ch18'] = np.ma.vstack(
                    (datavars['IMAGER']['ch18'], ch18))
                datavars['IMAGER']['surface'] = np.ma.vstack(
                    (datavars['IMAGER']['surface'], surf))
                datavars['IMAGER']['rain'] = np.ma.vstack(
                    (datavars['IMAGER']['rain'], rain))
                gvars['IMAGER']['Latitude'] = np.ma.masked_values(
                    gvars['IMAGER']['Latitude'], bad_value)
                gvars['IMAGER']['Longitude'] = np.ma.masked_values(
                    gvars['IMAGER']['Longitude'], bad_value)
                datavars['IMAGER']['ch08'] = np.ma.masked_values(
                    datavars['IMAGER']['ch08'], bad_value)
                datavars['IMAGER']['ch09'] = np.ma.masked_values(
                    datavars['IMAGER']['ch09'], bad_value)
                datavars['IMAGER']['ch10'] = np.ma.masked_values(
                    datavars['IMAGER']['ch10'], bad_value)
                datavars['IMAGER']['ch11'] = np.ma.masked_values(
                    datavars['IMAGER']['ch11'], bad_value)
                datavars['IMAGER']['ch17'] = np.ma.masked_values(
                    datavars['IMAGER']['ch17'], bad_value)
                datavars['IMAGER']['ch18'] = np.ma.masked_values(
                    datavars['IMAGER']['ch18'], bad_value)
                datavars['IMAGER']['surface'] = np.ma.masked_values(
                    datavars['IMAGER']['surface'], bad_value)
                datavars['IMAGER']['rain'] = np.ma.masked_values(
                    datavars['IMAGER']['rain'], bad_value)
#-----------------------------------------------------------------------------------------
            enviro_read = np.ma.zeros((nscan_enviro, 90))
            np.ma.masked_all_like(enviro_read)
            if scenecounts_enviro[0] < 0:
                print "ENVIRO is negative"
            lt = np.ma.masked_equal(enviro_read, bad_value)
            lg = np.ma.masked_equal(enviro_read, bad_value)
            ch12 = np.ma.masked_equal(enviro_read, bad_value)
            ch13 = np.ma.masked_equal(enviro_read, bad_value)
            ch14 = np.ma.masked_equal(enviro_read, bad_value)
            ch15 = np.ma.masked_equal(enviro_read, bad_value)
            ch16 = np.ma.masked_equal(enviro_read, bad_value)
            ch15_5x5 = np.ma.masked_equal(enviro_read, bad_value)
            ch16_5x5 = np.ma.masked_equal(enviro_read, bad_value)
            ch17_5x5 = np.ma.masked_equal(enviro_read, bad_value)
            ch18_5x5 = np.ma.masked_equal(enviro_read, bad_value)
            ch17_5x4 = np.ma.masked_equal(enviro_read, bad_value)
            ch18_5x4 = np.ma.masked_equal(enviro_read, bad_value)

            #ENVIRO READ DATA
            for ii in range(nscan_enviro):
                if ii % 2 == 0:  #for odd scan numbers
                    if start_scantime_enviro[ii] == -999:
                        print 'value of enviro odd scan is %d' % ii
                        continue
                    for jj in range(scenecounts_enviro[ii]):
                        enviroodd_lat, enviroodd_lon, enviroodd_scene = np.fromstring(
                            f1.read(6), dtype=np.dtype('short')).byteswap()
                        enviroodd_seaice, enviroodd_surf = np.fromstring(
                            f1.read(2), dtype=np.dtype('int8')).byteswap()
                        enviroodd_ch12, enviroodd_ch13, enviroodd_ch14, enviroodd_ch15, enviroodd_ch16, enviroodd_ch15_5x5, enviroodd_ch16_5x5, enviroodd_ch17_5x5, enviroodd_ch18_5x5, enviroodd_ch17_5x4, enviroodd_ch18_5x4 = np.fromstring(
                            f1.read(22), dtype=np.dtype('short')).byteswap()
                        enviroodd_rain1, enviroodd_rain2 = np.fromstring(
                            f1.read(2), dtype=np.dtype('int8')).byteswap()
                        edr_bitflags = np.fromstring(
                            f1.read(4), dtype=np.dtype('int32')).byteswap()
                        nbytes += 36
                        lat = 0.01 * enviroodd_lat
                        lon = 0.01 * enviroodd_lon
                        lt[ii][jj] = lat
                        lg[ii][jj] = lon
                        if rev6a == 1:
                            ch12[ii][jj] = enviroodd_ch12  #19H
                            ch13[ii][jj] = enviroodd_ch13  #19V
                            ch14[ii][jj] = enviroodd_ch14  #22V
                            ch15[ii][jj] = enviroodd_ch15  #37H
                            ch16[ii][jj] = enviroodd_ch16  #37V
                            ch15_5x5[ii][jj] = enviroodd_ch15_5x5
                            ch16_5x5[ii][jj] = enviroodd_ch16_5x5
                            ch17_5x5[ii][jj] = enviroodd_ch17_5x5
                            ch18_5x5[ii][jj] = enviroodd_ch18_5x5
                            ch17_5x4[ii][jj] = enviroodd_ch17_5x4
                            ch18_5x4[ii][jj] = enviroodd_ch18_5x4
                        else:
                            ch12[ii][jj] = 10 * enviroodd_ch12
                            ch13[ii][jj] = 10 * enviroodd_ch13
                            ch14[ii][jj] = 10 * enviroodd_ch14
                            ch15[ii][jj] = 10 * enviroodd_ch15
                            ch16[ii][jj] = 10 * enviroodd_ch16
                            ch15_5x5[ii][jj] = 10 * enviroodd_ch15_5x5
                            ch16_5x5[ii][jj] = 10 * enviroodd_ch16_5x5
                            ch17_5x5[ii][jj] = 10 * enviroodd_ch17_5x5
                            ch18_5x5[ii][jj] = 10 * enviroodd_ch18_5x5
                            ch17_5x4[ii][jj] = 10 * enviroodd_ch17_5x4
                            ch18_5x4[ii][jj] = 10 * enviroodd_ch18_5x4

                if ii % 2 == 1:  # for even scan numbers
                    if start_scantime_enviro[ii] == -999:
                        print 'value of enviro even scan is %d' % ii
                        continue
                    for jj in range(scenecounts_enviro[ii]):
                        enviroeven_lat, enviroeven_lon, enviroeven_scene = np.fromstring(
                            f1.read(6), dtype=np.dtype('short')).byteswap()
                        enviroeven_seaice, enviroeven_surf = np.fromstring(
                            f1.read(2), dtype=np.dtype('int8')).byteswap()
                        enviroeven_ch12, enviroeven_ch13, enviroeven_ch14, enviroeven_ch15, enviroeven_ch16 = np.fromstring(
                            f1.read(10), dtype=np.dtype('short')).byteswap()
                        nbytes += 18
                        lat = 0.01 * enviroeven_lat
                        lon = 0.01 * enviroeven_lon
                        lt[ii][jj] = lat
                        lg[ii][jj] = lon
                        if rev6a == 1:
                            ch12[ii][jj] = enviroeven_ch12
                            ch13[ii][jj] = enviroeven_ch13
                            ch14[ii][jj] = enviroeven_ch14
                            ch15[ii][jj] = enviroeven_ch15
                            ch16[ii][jj] = enviroeven_ch16
                        else:
                            ch12[ii][jj] = 10 * enviroeven_ch12
                            ch13[ii][jj] = 10 * enviroeven_ch13
                            ch14[ii][jj] = 10 * enviroeven_ch14
                            ch15[ii][jj] = 10 * enviroeven_ch15
                            ch16[ii][jj] = 10 * enviroeven_ch16

            if 'Latitude' not in gvars['ENVIRO'].keys():
                gvars['ENVIRO']['Latitude'] = lt
                gvars['ENVIRO']['Longitude'] = lg
                datavars['ENVIRO']['ch12'] = ch12
                datavars['ENVIRO']['ch13'] = ch13
                datavars['ENVIRO']['ch14'] = ch14
                datavars['ENVIRO']['ch15'] = ch15
                datavars['ENVIRO']['ch16'] = ch16
                datavars['ENVIRO']['ch15_5x5'] = ch15_5x5
                datavars['ENVIRO']['ch16_5x5'] = ch16_5x5
                datavars['ENVIRO']['ch17_5x5'] = ch17_5x5
                datavars['ENVIRO']['ch18_5x5'] = ch18_5x5
                datavars['ENVIRO']['ch17_5x4'] = ch17_5x4
                datavars['ENVIRO']['ch18_5x4'] = ch18_5x4
            else:
                gvars['ENVIRO']['Latitude'] = np.ma.vstack(
                    (gvars['ENVIRO']['Latitude'], lt))
                gvars['ENVIRO']['Longitude'] = np.ma.vstack(
                    (gvars['ENVIRO']['Longitude'], lg))
                datavars['ENVIRO']['ch12'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch12'], ch12))
                datavars['ENVIRO']['ch13'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch13'], ch13))
                datavars['ENVIRO']['ch14'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch14'], ch14))
                datavars['ENVIRO']['ch15'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch15'], ch15))
                datavars['ENVIRO']['ch16'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch16'], ch16))
                datavars['ENVIRO']['ch15_5x5'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch15_5x5'], ch15_5x5))
                datavars['ENVIRO']['ch16_5x5'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch16_5x5'], ch16_5x5))
                datavars['ENVIRO']['ch17_5x5'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch17_5x5'], ch17_5x5))
                datavars['ENVIRO']['ch18_5x5'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch18_5x5'], ch18_5x5))
                datavars['ENVIRO']['ch17_5x4'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch17_5x4'], ch17_5x4))
                datavars['ENVIRO']['ch18_5x4'] = np.ma.vstack(
                    (datavars['ENVIRO']['ch18_5x4'], ch18_5x4))
                gvars['ENVIRO']['Latitude'] = np.ma.masked_equal(
                    gvars['ENVIRO']['Latitude'], bad_value)
                gvars['ENVIRO']['Longitude'] = np.ma.masked_equal(
                    gvars['ENVIRO']['Longitude'], bad_value)
                datavars['ENVIRO']['ch12'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch12'], bad_value)
                datavars['ENVIRO']['ch13'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch13'], bad_value)
                datavars['ENVIRO']['ch14'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch14'], bad_value)
                datavars['ENVIRO']['ch15'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch15'], bad_value)
                datavars['ENVIRO']['ch16'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch16'], bad_value)
                datavars['ENVIRO']['ch15_5x5'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch15_5x5'], bad_value)
                datavars['ENVIRO']['ch16_5x5'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch16_5x5'], bad_value)
                datavars['ENVIRO']['ch17_5x5'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch17_5x5'], bad_value)
                datavars['ENVIRO']['ch18_5x5'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch18_5x5'], bad_value)
                datavars['ENVIRO']['ch17_5x4'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch17_5x4'], bad_value)
                datavars['ENVIRO']['ch18_5x4'] = np.ma.masked_equal(
                    datavars['ENVIRO']['ch18_5x4'], bad_value)
#-----------------------------------------------------------------------------------------
            las_read = np.ma.zeros((nscan_las, 60))
            np.ma.masked_all_like(las_read)
            if scenecounts_las[0] < 0:
                print "LAS is negative"
            lt = np.ma.masked_equal(las_read, bad_value)
            lg = np.ma.masked_equal(las_read, bad_value)
            ch01_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch02_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch03_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch04_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch05_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch06_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch07_3x3 = np.ma.masked_equal(las_read, bad_value)
            ch08_5x5 = np.ma.masked_equal(las_read, bad_value)
            ch09_5x5 = np.ma.masked_equal(las_read, bad_value)
            ch10_5x5 = np.ma.masked_equal(las_read, bad_value)
            ch11_5x5 = np.ma.masked_equal(las_read, bad_value)
            ch18_5x5 = np.ma.masked_equal(las_read, bad_value)
            ch24_3x3 = np.ma.masked_equal(las_read, bad_value)
            height_1000mb = np.ma.masked_equal(las_read, bad_value)
            surf = np.ma.masked_equal(las_read, bad_value)

            #LAS READ DATA
            for ii in range(nscan_las):
                if start_scantime_las[ii] == -999:
                    print 'value of las scan is %d' % ii
                    continue
                for jj in range(scenecounts_las[ii]):
                    try:
                        las_lati, las_long, las_ch01_3x3, las_ch02_3x3, las_ch03_3x3, las_ch04_3x3, las_ch05_3x3, las_ch06_3x3, las_ch07_3x3, las_ch08_5x5, las_ch09_5x5, las_ch10_5x5, las_ch11_5x5, las_ch18_5x5, las_ch24_3x3, las_height_1000mb, las_surf = np.fromstring(
                            f1.read(34), dtype=np.dtype('short')).byteswap()
                        las_tqflag, las_hqflag = np.fromstring(
                            f1.read(2), dtype=np.dtype('int8')).byteswap()
                        las_terrain, las_scene = np.fromstring(
                            f1.read(4), dtype=np.dtype('short')).byteswap()
                    except:
                        continue
                    lat = 0.01 * las_lati
                    lon = 0.01 * las_long
                    nbytes += 40
                    lt[ii][jj] = lat
                    lg[ii][jj] = lon
                    ch01_3x3[ii][jj] = las_ch01_3x3  #50.3 V
                    ch02_3x3[ii][jj] = las_ch02_3x3  #52.8 V
                    ch03_3x3[ii][jj] = las_ch03_3x3  #53.60V
                    ch04_3x3[ii][jj] = las_ch04_3x3  #54.4 V
                    ch05_3x3[ii][jj] = las_ch05_3x3  #55.5 V
                    ch06_3x3[ii][jj] = las_ch06_3x3  #57.3 RCP
                    ch07_3x3[ii][jj] = las_ch07_3x3  #59.4 RCP
                    ch08_5x5[ii][jj] = las_ch08_5x5  #150 H
                    ch09_5x5[ii][jj] = las_ch09_5x5  #183.31+-7 H
                    ch10_5x5[ii][jj] = las_ch10_5x5  #183.31+-3 H
                    ch11_5x5[ii][jj] = las_ch11_5x5  #183.31+-1 H
                    ch18_5x5[ii][jj] = las_ch18_5x5  #91 H
                    ch24_3x3[ii][jj] = las_ch24_3x3  #60.79+-36+-0.05 RCP
                    height_1000mb[ii][jj] = las_height_1000mb
                    surf[ii][jj] = las_surf

            if 'Latitude' not in gvars['LAS'].keys():
                gvars['LAS']['Latitude'] = lt
                gvars['LAS']['Longitude'] = lg
                datavars['LAS']['ch01_3x3'] = ch01_3x3
                datavars['LAS']['ch02_3x3'] = ch02_3x3
                datavars['LAS']['ch03_3x3'] = ch03_3x3
                datavars['LAS']['ch04_3x3'] = ch04_3x3
                datavars['LAS']['ch05_3x3'] = ch05_3x3
                datavars['LAS']['ch06_3x3'] = ch06_3x3
                datavars['LAS']['ch07_3x3'] = ch07_3x3
                datavars['LAS']['ch08_5x5'] = ch08_5x5
                datavars['LAS']['ch09_5x5'] = ch09_5x5
                datavars['LAS']['ch10_5x5'] = ch10_5x5
                datavars['LAS']['ch11_5x5'] = ch11_5x5
                datavars['LAS']['ch18_5x5'] = ch18_5x5
                datavars['LAS']['ch24_3x3'] = ch24_3x3
                datavars['LAS']['height_1000mb'] = height_1000mb
                datavars['LAS']['surf'] = surf
            else:
                gvars['LAS']['Latitude'] = np.ma.vstack(
                    (gvars['LAS']['Latitude'], lt))
                gvars['LAS']['Longitude'] = np.ma.vstack(
                    (gvars['LAS']['Longitude'], lg))
                datavars['LAS']['ch01_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch01_3x3'], ch01_3x3))
                datavars['LAS']['ch02_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch02_3x3'], ch02_3x3))
                datavars['LAS']['ch03_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch03_3x3'], ch03_3x3))
                datavars['LAS']['ch04_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch04_3x3'], ch04_3x3))
                datavars['LAS']['ch05_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch05_3x3'], ch05_3x3))
                datavars['LAS']['ch06_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch06_3x3'], ch06_3x3))
                datavars['LAS']['ch07_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch07_3x3'], ch07_3x3))
                datavars['LAS']['ch08_5x5'] = np.ma.vstack(
                    (datavars['LAS']['ch08_5x5'], ch08_5x5))
                datavars['LAS']['ch09_5x5'] = np.ma.vstack(
                    (datavars['LAS']['ch09_5x5'], ch09_5x5))
                datavars['LAS']['ch10_5x5'] = np.ma.vstack(
                    (datavars['LAS']['ch10_5x5'], ch10_5x5))
                datavars['LAS']['ch11_5x5'] = np.ma.vstack(
                    (datavars['LAS']['ch11_5x5'], ch11_5x5))
                datavars['LAS']['ch18_5x5'] = np.ma.vstack(
                    (datavars['LAS']['ch18_5x5'], ch18_5x5))
                datavars['LAS']['ch24_3x3'] = np.ma.vstack(
                    (datavars['LAS']['ch24_3x3'], ch24_3x3))
                datavars['LAS']['height_1000mb'] = np.ma.vstack(
                    (datavars['LAS']['height_1000mb'], height_1000mb))
                datavars['LAS']['surf'] = np.ma.vstack(
                    (datavars['LAS']['surf'], surf))
                gvars['LAS']['Latitude'] = np.ma.masked_equal(
                    gvars['LAS']['Latitude'], bad_value)
                gvars['LAS']['Longitude'] = np.ma.masked_equal(
                    gvars['LAS']['Longitude'], bad_value)
                datavars['LAS']['ch01_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch01_3x3'], bad_value)
                datavars['LAS']['ch02_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch02_3x3'], bad_value)
                datavars['LAS']['ch03_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch03_3x3'], bad_value)
                datavars['LAS']['ch04_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch04_3x3'], bad_value)
                datavars['LAS']['ch05_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch05_3x3'], bad_value)
                datavars['LAS']['ch06_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch06_3x3'], bad_value)
                datavars['LAS']['ch07_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch07_3x3'], bad_value)
                datavars['LAS']['ch08_5x5'] = np.ma.masked_equal(
                    datavars['LAS']['ch08_5x5'], bad_value)
                datavars['LAS']['ch09_5x5'] = np.ma.masked_equal(
                    datavars['LAS']['ch09_5x5'], bad_value)
                datavars['LAS']['ch10_5x5'] = np.ma.masked_equal(
                    datavars['LAS']['ch10_5x5'], bad_value)
                datavars['LAS']['ch11_5x5'] = np.ma.masked_equal(
                    datavars['LAS']['ch11_5x5'], bad_value)
                datavars['LAS']['ch18_5x5'] = np.ma.masked_equal(
                    datavars['LAS']['ch18_5x5'], bad_value)
                datavars['LAS']['ch24_3x3'] = np.ma.masked_equal(
                    datavars['LAS']['ch24_3x3'], bad_value)
                datavars['LAS']['height_1000mb'] = np.ma.masked_equal(
                    datavars['LAS']['height_1000mb'], bad_value)
                datavars['LAS']['surf'] = np.ma.masked_equal(
                    datavars['LAS']['surf'], bad_value)
#---------------------------------------------------------------------------------
            uas_read = np.ma.zeros((nscan_uas, 30))
            np.ma.masked_all_like(uas_read)
            if scenecounts_uas[0] < 0:
                print "UAS is negative"
            lt = np.ma.masked_equal(uas_read, bad_value)
            lg = np.ma.masked_equal(uas_read, bad_value)
            ch19_6x6 = np.ma.masked_equal(uas_read, bad_value)
            ch20_6x6 = np.ma.masked_equal(uas_read, bad_value)
            ch21_6x6 = np.ma.masked_equal(uas_read, bad_value)
            ch22_6x6 = np.ma.masked_equal(uas_read, bad_value)
            ch23_6x6 = np.ma.masked_equal(uas_read, bad_value)
            ch24_6x6 = np.ma.masked_equal(uas_read, bad_value)
            sceneu = np.ma.masked_equal(uas_read, bad_value)
            tqflag = np.ma.masked_equal(uas_read, bad_value)

            #UAS READ DATA
            for ii in range(nscan_uas):
                if start_scantime_uas[ii] == -999:
                    print 'value of uas scan is %d' % ii
                    continue
                for jj in range(scenecounts_uas[ii]):
                    uas_lat, uas_lon, uas_ch19_6x6, uas_ch20_6x6, uas_ch21_6x6, uas_ch22_6x6, uas_ch23_6x6, uas_ch24_6x6, uas_scene, uas_tqflag = np.fromstring(
                        f1.read(20), dtype=np.dtype('short')).byteswap()
                    uas_field, uas_bdotk2 = np.fromstring(
                        f1.read(8), dtype=np.dtype('int32')).byteswap()
                    nbytes += 28
                    lat = 0.01 * uas_lat
                    lon = 0.01 * uas_lon
                    lt[ii][jj] = lat
                    lg[ii][jj] = lon
                    ch19_6x6[ii][jj] = uas_ch19_6x6  #63.28+-0.28 RCP GHz
                    ch20_6x6[ii][jj] = uas_ch20_6x6  #60.79+-0.36 RCP
                    ch21_6x6[ii][jj] = uas_ch21_6x6  #60.79+-0.36+-0.002 RCP
                    ch22_6x6[ii][jj] = uas_ch22_6x6  #60.79+-0.36+-0.0055 RCP
                    ch23_6x6[ii][jj] = uas_ch23_6x6  #60.79+-0.36+-0.0016 RCP
                    ch24_6x6[ii][jj] = uas_ch24_6x6  #60.79+-0.36+-0.050 RCP
                    sceneu[ii][jj] = uas_scene
                    tqflag[ii][jj] = uas_tqflag

            if 'Latitude' not in gvars['UAS'].keys():
                gvars['UAS']['Latitude'] = lt
                gvars['UAS']['Longitude'] = lg
                datavars['UAS']['ch19_6x6'] = ch19_6x6
                datavars['UAS']['ch20_6x6'] = ch20_6x6
                datavars['UAS']['ch21_6x6'] = ch21_6x6
                datavars['UAS']['ch22_6x6'] = ch22_6x6
                datavars['UAS']['ch23_6x6'] = ch23_6x6
                datavars['UAS']['ch24_6x6'] = ch24_6x6
                datavars['UAS']['scene'] = sceneu
                datavars['UAS']['uas_tqflag'] = tqflag
            else:
                gvars['UAS']['Latitude'] = np.ma.vstack(
                    (gvars['UAS']['Latitude'], lt))
                gvars['UAS']['Longitude'] = np.ma.vstack(
                    (gvars['UAS']['Longitude'], lg))
                datavars['UAS']['ch19_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch19_6x6'], ch19_6x6))
                datavars['UAS']['ch20_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch20_6x6'], ch20_6x6))
                datavars['UAS']['ch21_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch21_6x6'], ch21_6x6))
                datavars['UAS']['ch22_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch22_6x6'], ch22_6x6))
                datavars['UAS']['ch23_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch23_6x6'], ch23_6x6))
                datavars['UAS']['ch24_6x6'] = np.ma.vstack(
                    (datavars['UAS']['ch24_6x6'], ch24_6x6))
                datavars['UAS']['scene'] = np.ma.vstack(
                    (datavars['UAS']['scene'], sceneu))
                datavars['UAS']['uas_tqflag'] = np.ma.vstack(
                    (datavars['UAS']['uas_tqflag'], tqflag))
                gvars['UAS']['Latitude'] = np.ma.masked_equal(
                    gvars['UAS']['Latitude'], bad_value)
                gvars['UAS']['Longitude'] = np.ma.masked_equal(
                    gvars['UAS']['Longitude'], bad_value)
                datavars['UAS']['ch19_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch19_6x6'], bad_value)
                datavars['UAS']['ch20_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch20_6x6'], bad_value)
                datavars['UAS']['ch21_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch21_6x6'], bad_value)
                datavars['UAS']['ch22_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch22_6x6'], bad_value)
                datavars['UAS']['ch23_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch23_6x6'], bad_value)
                datavars['UAS']['ch24_6x6'] = np.ma.masked_equal(
                    datavars['UAS']['ch24_6x6'], bad_value)
                datavars['UAS']['scene'] = np.ma.masked_equal(
                    datavars['UAS']['scene'], bad_value)
                datavars['UAS']['uas_tqflag'] = np.ma.masked_equal(
                    datavars['UAS']['uas_tqflag'], bad_value)

            print 'nfiller=', nfiller
            nfiller = 512 - (
                nbytes % 512
            )  # nfiller bytes to be skipped so that the next scan header will start at the 513th byte.
            try:
                filler_bytes = np.fromstring(
                    f1.read(nfiller), dtype=np.dtype('int8')).byteswap()[0]
            except:
                continue
        f1.close()
        #-----------------------------------------------------------------------------------------------------
        # Loop through each dataset name found in the dataset_info property above.
        for dsname in self.dataset_info.keys():
            for geoipsvarname, dfvarname in self.dataset_info[dsname].items():
                log.info('    Reading ' + dsname + ' channel "' + dfvarname +
                         '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                #shell()
                data = datavars[dsname][geoipsvarname]
                fillvalue = -999
                datavars[dsname][geoipsvarname] = (
                    np.ma.masked_equal(data, fillvalue) / 100) + 273.15

        # Loop through each dataset name found in the gvar_info property above.
        for dsname in self.gvar_info.keys():
            for geoipsvarname, dfvarname in self.gvar_info[dsname].items():
                log.info('    Reading ' + dsname + ' channel "' + dfvarname +
                         '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                #shell()
                data = gvars[dsname][geoipsvarname]
                fillvalue = -999
                gvars[dsname][geoipsvarname] = np.ma.masked_equal(
                    data, fillvalue)
Esempio n. 15
0
def latency(
    start_datetime,
    end_datetime,
    sensor,
    satellites,
    data_providers,
    channels,
    overall=False,
    verbose=True,
):

    log.info('\n\n')
    nowtime = datetime.utcnow()
    total_hours = (end_datetime - start_datetime).days * 24 + (
        end_datetime - start_datetime).seconds // 3600
    log.info(str(total_hours) + ' hours')

    allfiles = []
    for sat in satellites:
        log.info('Trying sat ' + sat + ' sensor: ' + sensor)
        currdata_providers = data_providers
        currchannels = channels
        if not data_providers:
            currdata_providers = ['*']
        if not channels:
            currchannels = ['*']
        for data_provider in currdata_providers:
            for channel in currchannels:
                allfiles += DataFileName.list_range_of_files(
                    sat,
                    sensor,
                    start_datetime,
                    end_datetime,
                    datetime_wildcards={
                        '%H': '*%H',
                        '%M': '*',
                        '%S': '*'
                    },
                    data_provider=data_provider,
                    resolution='*',
                    channel=channel,
                    producttype='*',
                    area='*',
                    extra='*',
                    ext='*',
                    forprocess=False)

    if overall:
        totalsize, totalnum = calc_latency(
            allfiles,
            fileclass='DataFileName',
            verbose=verbose,
            classkeys={
                'overall': ['sensorname', 'satname', 'dataprovider'],
            })
    else:
        totalsize, totalnum = calc_latency(
            allfiles,
            fileclass='DataFileName',
            verbose=verbose,
            classkeys={
                'individual':
                ['sensorname', 'satname', 'channel', 'dataprovider'],
                'overall': ['sensorname', 'satname', 'dataprovider'],
            })
    log.interactive('Total size on disk for ' + str(totalnum) +
                    ' data files: ' + convert_bytes(totalsize) + ': sensor: ' +
                    sensor + ' satellites: ' + ', '.join(satellites))
    return totalsize, totalnum
Esempio n. 16
0
    def read(self,
             fname,
             datavars,
             gvars,
             metadata,
             chans=None,
             sector_definition=None):
        class data_grid:
            #indir='devnull'...this looks for the directory that the file is in
            dtg = '9999999999'
            mapvars = {}
            nz = 0
            nzp1 = 0
            nnest = 0
            delx0 = 0
            dely0 = 0
            nest = [0]

        class Nest(object):
            def __init__(self, nx, ny, ii, jj, iref, jref, tcmx, tcmy, delx,
                         dely):
                self.nx = nx
                self.ny = ny
                self.ii = ii
                self.jj = jj
                self.iref = iref
                self.jref = jref
                self.tcmx = tcmx
                self.tcmy = tcmy
                self.delx = delx
                self.dely = dely

        # define the model grid
        model_grid = {}
        model_grid['im'] = 277
        model_grid['jm'] = 229
        model_grid['lm'] = 60
        model_grid['num_bytes'] = model_grid['im'] * model_grid['jm'] * 4

        #Constant files for geolocation and header
        latitude = '/SATPROJECT/users/projects3/users/laflash/outdirs/nrtdata/realtime/longterm_files/COAMPS_metadata/latitu_sfc_000000_000000_3a0277x0229_2018092400_00000000_fcstfld'
        longitude = '/SATPROJECT/users/projects3/users/laflash/outdirs/nrtdata/realtime/longterm_files/COAMPS_metadata/longit_sfc_000000_000000_3a0277x0229_2018092400_00000000_fcstfld'
        header = '/SATPROJECT/users/projects3/users/laflash/outdirs/nrtdata/realtime/longterm_files/COAMPS_metadata/datahd_sfc_000000_000000_1a2000x0001_2018090918_00000000_infofld'

        #def main(file, lat_file, lon_file, cdtg, fld, level, model_grid, image_dir):
        #    istat = 0
        #    lat, istat = seek_field(lat_file, model_grid, 1)
        #    lon, istat = seek_field(lon_file, model_grid, 1)
        #
        #    data, stat = seek_field(file, model_grid, level)
        #
        #    title = ( "%s lvl:%.2i %s %s" % (fld.upper(), int(level), cdtg, tau) )
        #    level_name = ( "l%.2i" % int(level) )
        #    image_name = '_'.join(["ascos1", "2a", cdtg, fld, level_name, tau])
        #
        #    plot_global(data, lat, lon, title, image_name,
        #                clabel=plot_parm[fld]['units'],
        #                range=[plot_parm[fld]['min'],plot_parm[fld]['max']])

        def seek_field(filename):
            print 'Reading file...'
            #metadata
            datafilename = filename.split('/')[-1].split('_')
            wxparameter = datafilename[0]
            level_type = datafilename[1]
            lvl1 = datafilename[2]
            lvl1 = "%06.1f" % (float(lvl1))
            lvl2 = datafilename[3]
            if wxparameter == 'latitu' or wxparameter == 'longit':
                lvl2 = 1
            else:
                lvl2 = "%06.1f" % (float(lvl2))
            imest_and_gridlevels = datafilename[4]
            dtg = datafilename[5]
            filetype = datafilename[6]
            record_length = model_grid['num_bytes']
            # top to bottom
            offset = (model_grid['lm'] - int(float(lvl2))) * record_length

            # bottom to top
            offset = (int(float(lvl2)) - 1) * record_length
            #offset = (1 - int(float(lvl2))) * record_length

            #  binary file read
            if os.path.isfile(filename):
                f = open(filename, 'rb')
                f.seek(offset)
                data = np.fromstring(f.read(model_grid['num_bytes']),
                                     dtype='float32')
                if sys.byteorder == 'little':
                    data = data.byteswap()
                data = data.reshape(model_grid['jm'], model_grid['im'])
                data = np.ma.masked_equal(data, -990.99)
                istat = 0
            else:
                print "missing file"
                print filename
                data = [[-999.99] * model_grid['im']] * model_grid['jm']
                istat = -1
            return data, wxparameter, level_type, lvl1, lvl2, dtg, filetype

        def read_coamps_header(filename):
            #"%s/datahd_sfc_000000_000000_1a2000x0001_%s_00000000_infofld"%(indir, dtg)
            if os.path.isfile(filename):  #might not need
                #data_grid.indir = indir#might not need

                f = open(filename, 'rb')

                datahd = f.read()
                datahd = list(datahd.split())

                # separate occasional values with no space between them
                for j in range(len(datahd)):
                    val = datahd[j]
                    if len(val) > 13:
                        i1 = 0
                        k = 0
                        for i in range(len(val) - 1):
                            if val[i:i + 1] == 'E':
                                newval = val[i1:i + 4]
                                if i + 4 < 15:
                                    datahd[j] = newval
                                else:
                                    datahd.insert(j + k, newval)
                            k = k + 1
                            i1 = i + 4

                data_grid.mapvars['nproj'] = float(datahd[2])
                data_grid.mapvars['stdlat1'] = float(datahd[3])
                data_grid.mapvars['stdlat2'] = float(datahd[4])
                data_grid.mapvars['stdlon'] = float(datahd[5])
                data_grid.mapvars['reflat'] = float(datahd[6])
                data_grid.mapvars['reflon'] = float(datahd[7])

                data_grid.nz = int(float(datahd[1]))
                data_grid.nzp1 = int(float(datahd[1])) + 1
                data_grid.nnest = int(float(datahd[10]))
                data_grid.delx0 = float(datahd[9])
                data_grid.dely0 = float(datahd[8])

                nn = 1
                while nn <= data_grid.nnest:
                    ng = 30 + (nn - 1) * 30
                    nx = int(float(datahd[ng - 1]))
                    ny = int(float(datahd[ng + 0]))
                    ii = float(datahd[ng + 1])
                    jj = float(datahd[ng + 2])
                    iref = float(datahd[ng + 3])
                    jref = float(datahd[ng + 4])
                    tcmx = float(datahd[ng + 27])
                    tcmy = float(datahd[ng + 28])
                    delx = float(datahd[ng + 6])
                    dely = float(datahd[ng + 7])
                    data_grid.nest.append(
                        Nest(nx, ny, ii, jj, iref, jref, tcmx, tcmy, delx,
                             dely))
                    nn = nn + 1

                # vertical indices
                nz = data_grid.nz
                dsigm = np.array(datahd[500:500 + nz]).astype(np.float)
                data_grid.sigw = np.append(
                    np.flipud(np.cumsum(np.flipud(dsigm))), [0.0])
                data_grid.sigm = datahd[800:800 + nz]
                data_grid.ztop = data_grid.sigw[0]

        data, wxparameter, level_type, lvl1, lvl2, dtg, filetype = seek_field(
            fname)
        metadata['top']['level'] = lvl2
        metadata['top']['start_datetime'] = datetime.strptime(dtg, '%Y%m%d%H')
        metadata['top']['end_datetime'] = datetime.strptime(dtg, '%Y%m%d%H')
        metadata['top']['dataprovider'] = 'NRL'
        metadata['top']['filename_datetime'] = metadata['top'][
            'start_datetime']
        metadata['top']['platform_name'] = 'model'
        metadata['top']['source_name'] = 'coampsieee'
        si = SatSensorInfo(metadata['top']['platform_name'],
                           metadata['top']['source_name'])
        if not si:
            raise SciFileError(
                'Unrecognized platform and source name combination: ' +
                metadata['top']['platform_name'] + ' ' +
                metadata['top']['source_name'])

        dfn = DataFileName(os.path.basename(fname))
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['filename_datetime'] = sdfn.datetime

        # Tells driver to NOT try to sector this data.
        metadata['top']['NON_SECTORABLE'] = True

        if chans == []:
            return

#        def rdata (filename)#parmnm, lvltyp, lev1, lev2, inest, dtg, tau, indir='def', outtyp='fcstfld', im=-1, jm=-1):
#
#        # read coamps binary flat file
#
#            if indir == 'def': indir = data_grid.indir
#            if im == -1: im = data_grid.nest[inest].nx
#            if jm == -1: jm = data_grid.nest[inest].ny
#            if inest == 0:
#        #global
#                filename = "%s/%s_%s_%06.1f_%06.1f_glob%03dx%03d_%s_%04d0000_%s" \
#                %(indir, parmnm, lvltyp, lev1, lev2, im, jm, dtg, tau, outtyp)
#            else:
#        #COAMPS
#                filename = "%s/%s_%s_%06d_%06d_%1da%04dx%04d_%s_%04d0000_%s" \
#                %(indir, parmnm, lvltyp, lev1, lev2, inest, im, jm, dtg, tau, outtyp)
#
#        #  print "Reading %s"%filename
#            num_bytes = im*jm*4
#
#            offset = 0
#
#        #  binary file read
#            if os.path.isfile(filename):
#                f = open( filename, 'rb' )
#                f.seek( offset )
#                data = np.fromstring(f.read(num_bytes), dtype='float32')
#        # COAMPS irsat values are little_endian all others are big
#                if sys.byteorder == 'little':
#                    if parmnm != 'irrcmp':
#                        data = data.byteswap()
#                data = data.reshape(jm, im)
#                data = np.ma.masked_equal(data, -990.99)
#                f.close()
#                istat = 0
#            else:
#                print "MISSING %s"%parmnm
#                print filename
#                data = [[-999.99] * im] * jm
#                istat = -1
#            return data, istat

# Loop through each dataset name found in the dataset_info property above.
        for dsname in self.dataset_info.keys():
            for geoipsvarname, dfvarname in self.dataset_info[dsname].items():
                log.info('    Reading ' + dsname + ' channel "' + dfvarname +
                         '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                fillvalue = data.fill_value
                datavars[dsname][geoipsvarname] = np.ma.masked_equal(
                    data, fillvalue)
        # Loop through each dataset name found in the gvar_info property above.
        for dsname in self.gvar_info.keys():
            for geoipsvarname, dfvarname in self.gvar_info[dsname].items():
                if dfvarname == 'latitu':
                    geolog, wxparameter, level_type, lvl1, lvl2, dtg, filetype = seek_field(
                        latitude)
                if dfvarname == 'longit':
                    geolog, wxparameter, level_type, lvl1, lvl2, dtg, filetype = seek_field(
                        longitude)
                    xx, yy = geolog.shape
                    for aa in range(xx):
                        for bb in range(yy):
                            if geolog[aa, bb] > 180:
                                geolog[aa, bb] = geolog[aa, bb] - 360
                fillvalue = geolog.fill_value
                log.info('    Reading ' + dsname + ' channel "' + dfvarname +
                         '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                gvars[dsname][geoipsvarname] = np.ma.masked_equal(
                    geolog, fillvalue)
Esempio n. 17
0
def find_available_data_files(opasses,
                              start_dt,
                              satellite,
                              sensor,
                              extra_dirs,
                              prodtype=None,
                              ext='*'):
    all_files = []
    #    runfulldir = None
    overall_start_dt = start_dt
    overall_end_dt = start_dt
    #print opasses
    for opass in sorted(opasses, key=operator.attrgetter('basedt')):

        log.interactive('Trying opass: '******'*'
            prodtype = dfn.sensorinfo.FName['default_producttype']
        #print prodtype
#        dfn.producttype = prodtype

#        runfulldir = dfn.sensorinfo.FName['runfulldir']

# If this is a long overpass, make sure we get the data files
# coming before the overpass time
        mins_per_file = dfn.sensorinfo.mins_per_file
        startdt = opass.startdt - timedelta(minutes=mins_per_file)
        enddt = opass.enddt + timedelta(minutes=mins_per_file)
        if startdt < overall_start_dt:
            overall_start_dt = startdt
        overall_end_dt = enddt

        all_files += DataFileName.list_range_of_files(
            satellite,
            sensor,
            startdt,
            enddt,
            datetime_wildcards={
                '%H': '%H',
                '%M': '*',
                '%S': '*'
            },
            data_provider='*',
            resolution='*',
            channel='*',
            producttype=prodtype,
            area='*',
            extra='*',
            ext='*',
            forprocess=True,
        )
        all_files = list(set(all_files))
        log.interactive('        Total files so far: ' + str(len(all_files)))

    if all_files:
        log.interactive('Available files:' + bigindent + bigindent.join(
            commands.getoutput('ls --full-time -d ' + str(file))
            for file in sorted(all_files)) + '\n\n')
    return all_files
Esempio n. 18
0

#        return None
#            if files:

    if list == False:
        file_num = 0
        for file in all_files:
            file_num = file_num + 1
            log.interactive('\n\n\n')
            log.interactive('Starting processing for file ' + str(file_num) +
                            ' of ' + str(len(all_files)) + ': ' + file +
                            '\n\n')
            if os.path.isdir(file):
                try:
                    dfn = DataFileName(glob(file + '/*')[0])
                except IndexError:
                    log.interactive(
                        '    Appear to be no data files in directory !! Skipping'
                    )
                    continue
            else:
                dfn = DataFileName(os.path.basename(file))
            dt = dfn.datetime
            if start_datetime and (dfn.datetime < start_datetime
                                   or dfn.datetime > end_datetime):
                log.interactive('Outside of time range, skipping')
                continue
            currsectorlist = []
            if not sectorlist:
                curropasses = pass_prediction([dfn.satname], [dfn.sensorname],
Esempio n. 19
0
 def makedirs(self, file):
     dfn = DataFileName(os.path.basename(file))
     sdfn = dfn.create_standard(downloadSiteObj=self)
     sdfn.makedirs()
Esempio n. 20
0
    def read(self,
             fname,
             datavars,
             gvars,
             metadata,
             chans=None,
             sector_definition=None):
        '''
        Reads all topo data in the region defined by the area definition.
        '''
        self.area_definition = sector_definition.area_definition
        self.shape = (43200, 21600)
        crnr_1, crnr_2 = self.corner_coords
        inds = ((crnr_1[0], crnr_2[0], 1), (crnr_1[1], crnr_2[1], 1))
        try:
            xstart, xend, xstep = inds[0]
            ystart, yend, ystep = inds[1]
            shape = (yend - ystart, xend - xstart)
            dims = 2
        #If that fails, try for one dimension
        except TypeError:
            start, end, step = inds
            shape = (end - start, )
            dims = 1

        storage_dtype = np.dtype('>i2')
        fileobj = open(fname, mode='r')

        #If we are only slicing in one dimension
        if dims == 1:
            data_array = self.read_slice(fileobj, start, shape, storage_dtype)
        elif dims == 2:
            lineshape = (xend - xstart, )
            data_array = np.ma.empty(shape, dtype=np.dtype(storage_dtype))
            newline = 0
            for line in range(ystart, yend, ystep):
                start = xstart + line * self.shape[0]
                linedata = self.read_slice(fileobj, start, lineshape,
                                           storage_dtype)
                data_array[newline, :] = linedata
                newline += 1

        fileobj.close()

        datavars['TOPO']['topo'] = data_array

        #Create lons and lats
        pixel_size = 30 / 3600.0  #30 arc seconds
        lonline = np.ma.arange(*inds[0]) * pixel_size
        gvars['TOPO']['Longitude'] = np.ma.vstack(
            [lonline for num in range(*inds[1])])
        latsamp = np.ma.arange(*inds[1]) * pixel_size
        gvars['TOPO']['Latitude'] = np.ma.hstack(
            [latsamp[np.newaxis].T for num in range(*inds[0])])
        #Convert lons to -180 to 180
        western = gvars['TOPO']['Longitude'] > 180
        gvars['TOPO']['Longitude'][
            western] = gvars['TOPO']['Longitude'][western] - 360

        #Convert lats to -90 to 90
        southern = gvars['TOPO']['Latitude'] > 90
        northern = gvars['TOPO']['Latitude'] <= 90
        gvars['TOPO']['Latitude'][southern] = -(
            gvars['TOPO']['Latitude'][southern] - 90)
        gvars['TOPO']['Latitude'][northern] = np.abs(
            gvars['TOPO']['Latitude'][northern] - 90)

        # Grab necessary metadata that will populate the _finfo,
        # _dsinfo, and _varinfo properties on the SciFile object.
        # These get passed along when initializing the Variable
        # instance, then get propagated up to the dsinfo and finfo
        # levels.
        # The available fields for varinfo can be found in
        # scifile/containers.py at the beginning of the file.
        try:
            sdfn = DataFileName(os.path.basename(fname)).create_standard()
            metadata['top']['start_datetime'] = sdfn.datetime
            metadata['top']['dataprovider'] = sdfn.dataprovider
        except:
            # Set an arbitrary time on the data... Not used for anything anyway...
            metadata['top']['start_datetime'] = datetime.strptime(
                '20150410.000000', '%Y%m%d.%H%M%S')
            metadata['top']['dataprovider'] = None
        metadata['top']['end_datetime'] = metadata['top']['start_datetime']
        #metadata['TOPO']['platform_name'] = sdfn.satname
        metadata['top']['platform_name'] = None
        #        metadata['TOPO']['filename_datetime'] = varinfo['start_datetime']
        metadata['top']['source_name'] = None

        return
Esempio n. 21
0
    def read(self,
             fname,
             datavars,
             gvars,
             metadata,
             chans=None,
             sector_definition=None):

        # Use filename field for filename_datetime if it is available.
        dfn = DataFileName(os.path.basename(glob(os.path.join(fname, '*'))[0]))
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['filename_datetime'] = sdfn.datetime

        metadata['top']['start_datetime'] = sdfn.datetime
        metadata['top']['end_datetime'] = sdfn.datetime
        metadata['top']['dataprovider'] = 'nesdisstar'
        metadata['top']['platform_name'] = sdfn.satname
        metadata['top']['source_name'] = 'seviri'
        # MUST be set on readers that sector at read time.
        # Affects how reading/processing is done in driver.py
        metadata['top']['sector_definition'] = sector_definition
        metadata['top']['SECTOR_ON_READ'] = True

        si = SatSensorInfo(metadata['top']['platform_name'],
                           metadata['top']['source_name'])
        if not si:
            from ..scifileexceptions import SciFileError
            raise SciFileError(
                'Unrecognized platform and source name combination: ' +
                metadata['top']['platform_name'] + ' ' +
                metadata['top']['source_name'])

        # chans == [] specifies we don't want to read ANY data, just metadata.
        # chans == None specifies that we are not specifying a channel list,
        #               and thus want ALL channels.
        if chans == []:
            # If NO CHANNELS were specifically requested, just return at this
            # point with the metadata fields populated. A dummy SciFile dataset
            # will be created with only metadata. This is for checking what
            # platform/source combination we are using, etc.
            return

        outdir = os.path.join(gpaths['LOCALSCRATCH'],
                              os.path.dirname(sdfn.name))
        self.decompress_msg(fname, outdir, chans)
        try:
            global_data = Scene(platform_name="Meteosat-8",
                                sensor="seviri",
                                reader="hrit_msg",
                                start_time=sdfn.datetime,
                                base_dir=outdir)
        except TypeError:
            global_data = Scene(
                filenames=glob(os.path.join(outdir, '*')),
                reader="hrit_msg",
                filter_parameters={'start_time': sdfn.datetime})
        metadata['top']['start_datetime'] = global_data.start_time
        metadata['top']['end_datetime'] = global_data.end_time

        # Loop through each dataset name found in the dataset_info property above.
        for dsname in self.dataset_info.keys():
            # Loop through the variables found in the current dataset
            # The dataset_info dictionary maps the geoips varname to the
            # varname found in the original datafile
            for geoipsvarname, spvarname in self.dataset_info[dsname].items():
                # If we requested specific channels, and the current channel
                # is not in the list, skip this variable.
                if chans and geoipsvarname not in chans:
                    continue
                # Read the current channel data into the datavars dictionary
                log.info('    Initializing ' + dsname + ' channel "' +
                         spvarname + '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                global_data.load([spvarname])
                # Read spvarname from the original datafile into datavars[dsname][geoipsvarname]
        ad = sector_definition.area_definition
        log.info('    Sectoring data to ' + ad.name + ' ...')
        sectored_data = global_data.resample(ad)
        for spvarname in sectored_data.datasets.keys():
            for dsname in self.dataset_info.keys():
                for geoipsvarname in self.dataset_info[dsname].keys():
                    if self.dataset_info[dsname][
                            geoipsvarname] == spvarname.name:
                        if 'Longitude' not in gvars[dsname].keys():
                            log.info('    Saving Longitude to gvars')
                            gvars[dsname]['Longitude'] = np.ma.array(
                                ad.get_lonlats()[0])
                        if 'Latitude' not in gvars[dsname].keys():
                            log.info('    Saving Latitude to gvars')
                            gvars[dsname]['Latitude'] = np.ma.array(
                                ad.get_lonlats()[1])
                        if 'SunZenith' not in gvars[dsname].keys():
                            from geoips.scifile.solar_angle_calc import satnav
                            log.info(
                                '        Using satnav, can only calculate Sun Zenith angles'
                            )
                            gvars[dsname]['SunZenith'] = satnav(
                                'SunZenith', metadata['top']['start_datetime'],
                                gvars[dsname]['Longitude'],
                                gvars[dsname]['Latitude'])
                        self.set_variable_metadata(metadata, dsname,
                                                   geoipsvarname)
                        try:
                            datavars[dsname][geoipsvarname] =\
                             np.ma.array(sectored_data.datasets[spvarname.name].data,
                             mask=sectored_data.datasets[spvarname.name].mask)
                            log.warning('Sectored variable %s ' %
                                        (spvarname.name))
                        except AttributeError:
                            log.warning(
                                'Variable %s does not contain a mask, masking invalid values! Might take longer'
                                % (spvarname.name))
                            datavars[dsname][geoipsvarname] =\
                                np.ma.masked_invalid(sectored_data.datasets[spvarname.name].data)
Esempio n. 22
0
    def read(self,fname,datavars,gvars,metadata,chans=None,sector_definition=None):

        # Use the appropriate python based reader for opening the current data 
        # type. This package must be installed on the system, and imported above
        # in the "Installed Libraries" import section.
        df = ncdf.Dataset(str(fname), 'r')

        # Uncomment this shell statement in order to interrogate the data file 
        # and determine what attributes and fields need to be read from the 
        # data file.
        #print 'Entering IPython shell in '+self.name+' for development purposes'
        #shell()

        # Grab necessary metadata that will populate the _finfo, 
        # _dsinfo, and _varinfo properties on the SciFile object.
        # The available fields for varinfo can be found in 
        # scifile/containers.py at the beginning of the file.
        # Additional options available with the metadata dictionary
        # can be found in the comments with the dataset_info property above.
        metadata['top']['start_datetime'] = datetime.strptime(df.time_coverage_start.split('.')[0],'%Y-%m-%dT%H:%M:%S')
        # Note an apparent bug in productfilename uses end_datetime as filename. 
        # For now just leave out end_datetime (it automatically gets set to start_datetime
        # in scifile if undefined)
        # Ooops, this might have messed up pass predictor
        metadata['top']['end_datetime'] = datetime.strptime(df.time_coverage_end.split('.')[0],'%Y-%m-%dT%H:%M:%S')
        metadata['top']['dataprovider'] = 'unknown'
        # DOC/NOAA/NESDIS/OSPO > Office of Satellite and Product Operations,     NESDIS, NOAA, U.S. Department of
        # Commerce
        if 'DOC/NOAA/NESDIS/OSPO' in df.institution:
            metadata['top']['dataprovider'] = 'noaa-nesdis-ospo'
        elif 'NOAA' in df.institution and 'NESDIS' in df.institution:
            metadata['top']['dataprovider'] = 'noaanesdis'
        elif 'NOAA' in df.institution:
            metadata['top']['dataprovider'] = 'noaa'
        metadata['top']['filename_datetime'] = metadata['top']['start_datetime']

        # Tells driver to NOT try to sector this data.
        metadata['top']['NON_SECTORABLE'] = True

        # platform_name and source_name MUST match values found 
        # in SensorInfo_classes and SatInfo_classes in utils/satellite_info.py. 
        # Those are the keys used throughout GeoIPS for determining what data 
        # type we are working with. If opening the SatSensorInfo object fails,
        # raise an Error and stop operation.
        
        # source_name = 'amsr2'
        # platform_name = 'gcom-w1'
        metadata['top']['platform_name'] = df.platform_name.lower()
        metadata['top']['source_name'] = df.instrument_name.lower()
        si = SatSensorInfo(metadata['top']['platform_name'],metadata['top']['source_name'])
        if not si:
            from ..scifileexceptions import SciFileError
            raise SciFileError('Unrecognized platform and source name combination: '+metadata['top']['platform_name']+' '+metadata['top']['source_name'])

        # Use filename field for filename_datetime if it is available.
        # Else, just use the start_datetime we found from the data
        # above. Note we ALWAYS want to have a default if DataFileName
        # is not defined.  We do not want to rely on having our specific 
        # internal filename format in order to process, but if we have 
        # additional information available from the data filename, we
        # can use it.
        dfn = DataFileName(os.path.basename(fname)) 
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['filename_datetime'] = sdfn.datetime


        # chans == [] specifies we don't want to read ANY data, just metadata.
        # chans == None specifies that we are not specifying a channel list, 
        #               and thus want ALL channels.
        if chans == []:
            # If NO CHANNELS were specifically requested, just return at this 
            # point with the metadata fields populated. A dummy SciFile dataset 
            # will be created with only metadata. This is for checking what 
            # platform/source combination we are using, etc.
            return 


        # Set up the dictionaries of variables that will go in each dataset.
        #       datavars: actual channel data
        #       gvars:    geolocation variable data 
        #                 specifically named:
        #                 Latitude (REQUIRED), 
        #                 Longitude (REQUIRED), and 
        #                 SunZenith (optional, required for day/night 
        #                 discrimination) 

        # Each data variable array and geolocation variable array of a 
        #   specific dataset_name MUST be the same shape

        # datavars[dataset_name][geoips_varname] = geoips_varname_channel_data
        # gvars[dataset_name]['Latitude'] = dataset_name_lat_numpy_array
        # gvars[dataset_name]['Longitude'] = dataset_name_lon_numpy_array
        # *OPTIONAL* gvars[dataset_name]['SunZenith'] = dataset_name_sunzenith_numpy_array
        
        # geoips_varname_channel_data.shape == dataset_name_lat_numpy_array.shape 
        #   == dataset_name_lon_array.shape == dataset_name_sunzenith_numpy_array.shape

        # Only datavars and gvars with the same shape can go in the same dataset.

        # See additional datavars and gvars dictionary structure information
        #       found in the comments above, with the dataset_info property of this reader.


        # Loop through each dataset name found in the dataset_info property above.
        for dsname in self.dataset_info.keys():
            # Loop through the variables found in the current dataset
            # The dataset_info dictionary maps the geoips varname to the
            # varname found in the original datafile
            for geoipsvarname,ncvarname in self.dataset_info[dsname].items():
                # If we requested specific channels, and the current channel
                # is not in the list, skip this variable.
                if chans and geoipsvarname not in chans:
                    continue
                # Read the current channel data into the datavars dictionary
                log.info('    Reading '+dsname+' channel "'+ncvarname+'" from file into SciFile channel: "'+geoipsvarname+'"...')
                # Read ncvarname from the original datafile into datavars[dsname][geoipsvarname]
                datavars[dsname][geoipsvarname] = np.ma.masked_equal(df.variables[ncvarname][...],df.variables[ncvarname]._FillValue)

        # Loop through each dataset name found in the gvar_info property above.
        for dsname in self.gvar_info.keys():
            # Loop through the variables found in the current dataset
            # The gvar_info dictionary maps the geoips varname to the
            # varname found in the original datafile
            for geoipsvarname,ncvarname in self.gvar_info[dsname].items():
                # Read the current channel data into the datavars dictionary
                log.info('    Reading '+dsname+' channel "'+ncvarname+'" from file into SciFile channel: "'+geoipsvarname+'"...')
                # Read ncvarname from the original datafile into datavars[dsname][geoipsvarname]
                gvars[dsname][geoipsvarname] = np.ma.masked_equal(df.variables[ncvarname][...],df.variables[ncvarname]._FillValue)
Esempio n. 23
0
    def read(self,
             fname,
             datavars,
             gvars,
             metadata,
             chans=None,
             sector_definition=None):
        # Since the variables are all contained within separate files, we need to allow for
        # a loop to cycle through the files
        df = pg.open(fname)
        temp = None
        for vars in MODEL_GRIB_Reader.dataset_info.values():
            for var in vars.values():
                try:
                    temp = df.select(name=var)[0]  #grabs the first file
                except:
                    if var == '238 (instant)' or var == '133 (instant)' or var == '222 (instant)' or var == '218 (instant)' or var == '221 (instant)':
                        temptemp = df.select()
                        if var in str(temptemp):
                            temp = temptemp[0]
                    continue
        #temp = df.select(name = 'Temperature')[0]

        #print 'Entering IPython shell in '+self.name+' for development purposes'
        #shell()
        dt = '{:08d}{:04d}'.format(temp.validityDate, temp.validityTime)
        print dt

        if not temp:
            log.warning('Unable to read from file ' + fname +
                        ' no matching select for vars')
            return
        if temp.validityDate:
            metadata['top']['start_datetime'] = datetime.strptime(
                dt, '%Y%m%d%H%M')
            metadata['top']['end_datetime'] = datetime.strptime(
                dt, '%Y%m%d%H%M')
        else:
            metadata['top']['start_datetime'] = temp.analDate
            metadata['top']['end_datetime'] = temp.analDate
        metadata['top']['dataprovider'] = temp.centre
        metadata['top']['filename_datetime'] = metadata['top'][
            'start_datetime']
        metadata['top']['platform_name'] = 'model'
        #metadata['top']['source_name'] = 'model'
        metadata['top']['tau'] = temp.startStep
        metadata['top']['level'] = temp.level

        if 'COAMPS' in fname:
            metadata['top']['source_name'] = 'coamps'
        else:
            metadata['top']['source_name'] = 'navgem'

        si = SatSensorInfo(metadata['top']['platform_name'],
                           metadata['top']['source_name'])
        if not si:
            from ..scifileexceptions import SciFileError
            raise SciFileError(
                'Unrecognized platform and source name combination: ' +
                metadata['top']['platform_name'] + ' ' +
                metadata['top']['source_name'])

        dfn = DataFileName(os.path.basename(fname))
        if dfn:
            sdfn = dfn.create_standard()
            metadata['top']['filename_datetime'] = sdfn.datetime

        # Tells driver to NOT try to sector this data.
        metadata['top']['NON_SECTORABLE'] = True
        if chans == []:
            return

        new = None
        # Loop through each dataset name found in the dataset_info property above.
        for dsname in self.dataset_info.keys():
            for geoipsvarname, dfvarname in self.dataset_info[dsname].items():
                try:
                    new = df.select(
                        name=dfvarname)  #,typeOfLevel='isobaricInhPa')
                except:
                    continue
                for newest in new:
                    newest
                    data = newest.values
                    fillvalue = newest.missingValue
                    level = newest.level
                    #shell()
                    if newest.units == str('m s**-1'):
                        data = data * 1.94384

                    log.info('    Reading ' + dsname + ' channel "' +
                             dfvarname +
                             '" from file into SciFile channel: "' +
                             geoipsvarname + str(level) + '" ...')
                    datavars[dsname][geoipsvarname +
                                     str(level)] = np.ma.masked_equal(
                                         data, fillvalue)
                    shape = datavars[dsname][geoipsvarname + str(level)].shape
        if not new:
            log.warning('Unable to read from file ' + fname +
                        ' no matching selects')
            return
        # Loop through each dataset name found in the gvar_info property above.
        for dsname in self.gvar_info.keys():
            for geoipsvarname, dfvarname in self.gvar_info[dsname].items():
                log.info('    Reading ' + dsname + ' channel "' + dfvarname +
                         '" from file into SciFile channel: "' +
                         geoipsvarname + '"...')
                for newest in new:
                    newest
                data = newest[dfvarname]
                fillvalue = newest.missingValue
                #shell()
                if data.size == newest.getNumberOfValues:
                    data = np.reshape(data, shape)
                #if data.shape == (1038240,):
                #    data= np.reshape(data,(721,1440))
                #elif data.shape == (259920,):
                #    data= np.reshape(data,(361,720))
                #elif data.shape == (76454,):
                #    data= np.reshape(data,(254, 301))
                gvars[dsname][geoipsvarname] = np.ma.masked_equal(
                    data, fillvalue)