Example #1
0
    def listVarOptions(self, package, setname, varname):
        import metrics.fileio.filetable as ft
        import metrics.fileio.findfiles as fi
        dtree = fi.dirtree_datafiles(self, pathid=0)
        filetable = ft.basic_filetable(dtree, self)

        if package[0].lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
            pinstance = metrics.packages.lmwg.lmwg.LMWG()
        elif package[0].lower() == 'amwg':
            import metrics.packages.amwg.amwg
            pinstance = metrics.packages.amwg.amwg.AMWG()

        slist = pinstance.list_diagnostic_sets()
        keys = slist.keys()
        keys.sort()
        for k in keys:
            fields = k.split()
            if setname[0] == fields[0]:
                vl = slist[k]._all_variables(filetable)
                for v in varname:
                    if v in vl.keys():
                        #                  vo = slist[k][v].varoptions()
                        vo = vl[v].varoptions()
                        print 'Variable ', v, 'in set', setname[
                            0], 'from package', package[
                                0], 'at path', self._opts['path'][
                                    0], 'has options:'
                        print vo
Example #2
0
    def listVarOptions(self, package, setname, varname):
        import metrics.fileio.filetable as ft
        import metrics.fileio.findfiles as fi

        dtree = fi.dirtree_datafiles(self, pathid=0)
        filetable = ft.basic_filetable(dtree, self)

        if package[0].lower() == "lmwg":
            import metrics.packages.lmwg.lmwg

            pinstance = metrics.packages.lmwg.lmwg.LMWG()
        elif package[0].lower() == "amwg":
            import metrics.packages.amwg.amwg

            pinstance = metrics.packages.amwg.amwg.AMWG()

        slist = pinstance.list_diagnostic_sets()
        keys = slist.keys()
        keys.sort()
        for k in keys:
            fields = k.split()
            if setname[0] == fields[0]:
                vl = slist[k]._all_variables(filetable)
                for v in varname:
                    if v in vl.keys():
                        #                  vo = slist[k][v].varoptions()
                        vo = vl[v].varoptions()
                        print "Variable ", v, "in set", setname[0], "from package", package[0], "at path", self._opts[
                            "path"
                        ][0], "has options:"
                        print vo
Example #3
0
    def listVariables(self, package, setname):
        import metrics.fileio.filetable as ft
        import metrics.fileio.findfiles as fi

        dtree = fi.dirtree_datafiles(self, pathid=0)
        filetable = ft.basic_filetable(dtree, self)

        # this needs a filetable probably, or we just define the maximum list of variables somewhere
        #      im = ".".join(['metrics', 'packages', package[0].lower()])
        if package[0].lower() == "lmwg":
            import metrics.packages.lmwg.lmwg

            pinstance = metrics.packages.lmwg.lmwg.LMWG()
        elif package[0].lower() == "amwg":
            import metrics.packages.amwg.amwg

            pinstance = metrics.packages.amwg.amwg.AMWG()

        # assume we have a path provided

        #      if pclass:
        #         slist = pclass.list_diagnostic_sets()
        #      else:
        slist = pinstance.list_diagnostic_sets()
        keys = slist.keys()
        keys.sort()
        vl = []
        for k in keys:
            fields = k.split()
            if setname[0] == fields[0]:
                vl = slist[k]._list_variables(filetable)
                print "Available variabless for set", setname[0], "in package", package[0], "at path", self._opts[
                    "path"
                ][0], ":"
                print vl
                print "NOTE: Not all variables make sense for plotting or running diagnostics. Multi-word variable names need enclosed in single quotes:'word1 word2'"
                print "ALL is a valid variable name as well"
        if vl == []:
            print "No variable list returned. Is set", setname[0], "a valid set?"
            quit()
        return
Example #4
0
    def listVariables(self, package, setname):
        import metrics.fileio.filetable as ft
        import metrics.fileio.findfiles as fi
        dtree = fi.dirtree_datafiles(self, pathid=0)
        filetable = ft.basic_filetable(dtree, self)

        # this needs a filetable probably, or we just define the maximum list of variables somewhere
        #      im = ".".join(['metrics', 'packages', package[0].lower()])
        if package[0].lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
            pinstance = metrics.packages.lmwg.lmwg.LMWG()
        elif package[0].lower() == 'amwg':
            import metrics.packages.amwg.amwg
            pinstance = metrics.packages.amwg.amwg.AMWG()

        # assume we have a path provided

#      if pclass:
#         slist = pclass.list_diagnostic_sets()
#      else:
        slist = pinstance.list_diagnostic_sets()
        keys = slist.keys()
        keys.sort()
        vl = []
        for k in keys:
            fields = k.split()
            if setname[0] == fields[0]:
                vl = slist[k]._list_variables(filetable)
                print 'Available variabless for set', setname[
                    0], 'in package', package[0], 'at path', self._opts[
                        'path'][0], ':'
                print vl
                print 'NOTE: Not all variables make sense for plotting or running diagnostics. Multi-word variable names need enclosed in single quotes:\'word1 word2\''
                print 'ALL is a valid variable name as well'
        if vl == []:
            print 'No variable list returned. Is set', setname[
                0], 'a valid set?'
            quit()
        return
Example #5
0
   def listVarOptions(self, package, setname, varname):
      import metrics.fileio.filetable as ft
      import metrics.fileio.findfiles as fi
      if setname is None:
         print "ERROR, must specify plot set to list variable options"
         quit()
      if varname is None:
         print "ERROR, must specify variable to list variable options"
         quit()
      dtree = fi.dirtree_datafiles(self, modelid=0)
      filetable = ft.basic_filetable(dtree, self)

      if package is None:
         print "ERROR, must specify package to list variable options"
         quit()
      elif package.lower() == 'lmwg':
         import metrics.packages.lmwg.lmwg
         pinstance = metrics.packages.lmwg.lmwg.LMWG()
      elif package.lower()=='amwg':
         import metrics.packages.amwg.amwg
         pinstance = metrics.packages.amwg.amwg.AMWG()

      slist = pinstance.list_diagnostic_sets()
      keys = slist.keys()
      keys.sort()
      for k in keys:
         fields = k.split()
         if setname[0] == fields[0]:
            vl = slist[k]._all_variables(filetable, filetable)
            for v in varname:
               if v in vl.keys():
                  vo = vl[v].varoptions()
                  print 'Variable ', v,'in set', setname[0],'from package',package,'at path', self._opts['model'][0]['path'],'has options:'
                  print vo
               else:
                  print 'Variable ', v,'in set', setname[0],'from package',package,'at path', self._opts['model'][0]['path'],'has no options.'
Example #6
0
   def listVariables(self, package, setname):
      import metrics.fileio.filetable as ft
      import metrics.fileio.findfiles as fi
      if setname is None:
         print "ERROR, must specify plot set to list variables"
         quit()
      dtree = fi.dirtree_datafiles(self, modelid=0)
      filetable = ft.basic_filetable(dtree, self)

      # this needs a filetable probably, or we just define the maximum list of variables somewhere
      if package is None:
         print "ERROR, must specify package to list variables"
         quit()
      elif package.lower() == 'lmwg':
         import metrics.packages.lmwg.lmwg
         pinstance = metrics.packages.lmwg.lmwg.LMWG()
      elif package.lower()=='amwg':
         import metrics.packages.amwg.amwg
         pinstance = metrics.packages.amwg.amwg.AMWG()

      slist = pinstance.list_diagnostic_sets()
      keys = slist.keys()
      keys.sort()
      vl = []
      for k in keys:
         fields = k.split()
         if setname[0] == fields[0]:
            vl = slist[k]._list_variables(filetable, filetable)
            print 'Available variabless for set', setname[0], 'in package', package,'at path', self._opts['model'][0]['path'],':'
            print vl
            print 'NOTE: Not all variables make sense for plotting or running diagnostics. Multi-word variable names need enclosed in single quotes:\'word1 word2\''
            print 'ALL is a valid variable name as well'
      if vl == []:
         logging.critical('No variable list returned. Is set %s a valid set?',setname[0])
         quit()
      return
Example #7
0
    def processCmdLine(self):
        parser = argparse.ArgumentParser(
            description="UV-CDAT Climate Modeling Diagnostics", usage="%(prog)s --path1 [options]"
        )

        parser.add_argument(
            "--path",
            "-p",
            action="append",
            nargs=1,
            help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.",
        )
        parser.add_argument("--path2", "-q", action="append", nargs=1, help="Path to a second dataset.")
        parser.add_argument("--obspath", action="append", nargs=1, help="Path to an observational dataset")
        parser.add_argument("--cachepath", nargs=1, help="Path for temporary and cachced files. Defaults to /tmp")
        #      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
        #         help="The realm type. Current valid options are 'land' and 'atmosphere'")
        parser.add_argument(
            "--filter",
            "-f",
            nargs=1,
            help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.",
        )
        parser.add_argument(
            "--filter2",
            "-g",
            nargs=1,
            help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.",
        )
        parser.add_argument(
            "--new_filter",
            "-F",
            action="append",
            nargs=1,
            help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.",
        )
        parser.add_argument(
            "--packages",
            "--package",
            "-k",
            nargs="+",
            help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.",
        )
        parser.add_argument(
            "--sets",
            "--set",
            "-s",
            nargs="+",
            help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package",
        )
        parser.add_argument(
            "--vars",
            "--var",
            "-v",
            nargs="+",
            help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL",
        )
        parser.add_argument(
            "--list",
            "-l",
            nargs=1,
            choices=["sets", "vars", "variables", "packages", "seasons", "regions", "translations", "options"],
            help="Determine which packages, sets, regions, variables, and variable options are available",
        )
        # maybe eventually add compression level too....
        parser.add_argument(
            "--compress",
            nargs=1,
            choices=["no", "yes"],
            help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools",
        )  # no compression, add self state

        parser.add_argument(
            "--outputpre",
            nargs=1,
            help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc",
        )
        parser.add_argument(
            "--outputpost",
            nargs=1,
            help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc",
        )
        parser.add_argument("--outputdir", "-O", nargs=1, help="Directory in which output files will be written.")

        parser.add_argument(
            "--seasons", nargs="+", choices=all_seasons, help="Specify which seasons to generate climatoogies for"
        )
        parser.add_argument("--years", nargs="+", help="Specify which years to include when generating climatologies")
        parser.add_argument(
            "--months", nargs="+", choices=all_months, help="Specify which months to generate climatologies for"
        )
        parser.add_argument(
            "--climatologies",
            "-c",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether or not climatologies should be generated",
        )
        parser.add_argument(
            "--plots", "-t", nargs=1, choices=["no", "yes"], help="Specifies whether or not plots should be generated"
        )
        parser.add_argument("--plottype", nargs=1)
        parser.add_argument(
            "--precomputed",
            nargs=1,
            choices=["no", "yes"],
            help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc",
        )
        parser.add_argument(
            "--json",
            "-j",
            nargs=1,
            choices=["no", "yes"],
            help="Produce JSON output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--netcdf",
            "-n",
            nargs=1,
            choices=["no", "yes"],
            help="Produce NetCDF output files as part of climatology/diags generation",
        )  # same
        parser.add_argument(
            "--xml",
            "-x",
            nargs=1,
            choices=["no", "yes"],
            help="Produce XML output files as part of climatology/diags generation",
        )
        parser.add_argument(
            "--seasonally",
            action="store_true",
            help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons",
        )
        parser.add_argument("--monthly", action="store_true", help="Produce climatologies for all predefined months")
        parser.add_argument(
            "--yearly", action="store_true", help="Produce annual climatogolies for all years in the dataset"
        )
        parser.add_argument(
            "--timestart", nargs=1, help="Specify the starting time for the dataset, such as 'months since Jan 2000'"
        )
        parser.add_argument(
            "--timebounds",
            nargs=1,
            choices=["daily", "monthly", "yearly"],
            help="Specify the time boudns for the dataset",
        )
        parser.add_argument(
            "--verbose",
            "-V",
            action="count",
            help="Increase the verbosity level. Each -v option increases the verbosity more.",
        )  # count
        parser.add_argument(
            "--name", action="append", nargs=1, help="Specify option names for the datasets for plot titles, etc"
        )  # optional name for the set
        # This will be the standard list of region names NCAR has
        parser.add_argument(
            "--regions",
            "--region",
            nargs="+",
            choices=all_regions.keys(),
            help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'",
        )
        parser.add_argument("--starttime", nargs=1, help="Specify a start time in the dataset")
        parser.add_argument("--endtime", nargs=1, help="Specify an end time in the dataset")
        parser.add_argument(
            "--translate",
            nargs="?",
            default="y",
            help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1",
        )
        parser.add_argument("--varopts", nargs="+", help="Variable auxillary options")

        args = parser.parse_args()

        if args.list != None:
            if args.list[0] == "translations":
                print "Default variable translations: "
                self.listTranslations()
                quit()
            if args.list[0] == "regions":
                print "Available geographical regions: ", all_regions.keys()
                quit()

            if args.list[0] == "seasons":
                print "Available seasons: ", all_seasons
                quit()

            if args.list[0] == "packages":
                print "Listing available packages:"
                print self.all_packages.keys()
                quit()

            if args.list[0] == "sets":
                if args.packages == None:
                    print "Please specify package before requesting available diags sets"
                    quit()
                for p in args.packages:
                    print "Avaialble sets for package ", p, ":"
                    sets = self.listSets(p)
                    keys = sets.keys()
                    for k in keys:
                        print "Set", k, " - ", sets[k]
                quit()

            if args.list[0] == "variables" or args.list[0] == "vars":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVariables(args.packages, args.sets)
                quit()
            if args.list[0] == "options":
                if args.path != None:
                    for i in args.path:
                        self._opts["path"].append(i[0])
                else:
                    print "Must provide a dataset when requesting a variable listing"
                    quit()
                self.listVarOptions(args.packages, args.sets, args.vars)
                quit()

        # Generally if we've gotten this far, it means no --list was specified. If we don't have
        # at least a path, we should exit.
        if args.path != None:
            for i in args.path:
                self._opts["path"].append(i[0])
        else:
            print "Must specify a path or the --list option at a minimum."
            print 'For help, type "diags --help".'
            quit()
        if args.path2 != None:
            for i in args.path2:
                self._opts["path2"].append(i[0])

        if args.obspath != None:
            for i in args.obspath:
                self._opts["obspath"].append(i[0])

        # TODO: Should some pre-defined filters be "nameable" here?
        if args.filter != None:  # Only supports one filter argument, see filter2.
            self._opts["filter"] = args.filter[0]
            self._opts["user_filter"] = True
        #         for i in args.filter:
        #            self._opts['filter'].append(i[0])
        if args.filter2 != None:  # This is a second filter argument.
            self._opts["filter2"] = args.filter2[0]
            self._opts["user_filter"] = True
        if args.new_filter != None:  # like filter but with multiple arguments
            for i in args.new_filter:
                self._opts["new_filter"].append(i[0])

        if args.cachepath != None:
            self._opts["cachepath"] = args.cachepath[0]

        self._opts["seasonally"] = args.seasonally
        self._opts["monthly"] = args.monthly

        if args.starttime != None:
            self._opts["start"] = args.starttime[0]

        if args.endtime != None:
            self._opts["end"] = args.endtime[0]

        # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
        # they are still set after you set them once in the python process.
        if args.compress != None:
            if args.compress[0] == "no":
                self._opts["compress"] = False
            else:
                self._opts["compress"] = True

        if self._opts["compress"] == True:
            print "Enabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(1)
            cdms2.setNetcdfDeflateFlag(1)
            cdms2.setNetcdfDeflateLevelFlag(9)
        else:
            print "Disabling compression for output netCDF files"
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setNetcdfDeflateFlag(0)
            cdms2.setNetcdfDeflateLevelFlag(0)

        if args.json != None:
            if args.json[0] == "no":
                self._opts["json"] = False
            else:
                self._opts["json"] = True
        if args.xml != None:
            if args.xml[0] == "no":
                self._opts["xml"] = False
            else:
                self._opts["xml"] = True

        if args.netcdf != None:
            if args.netcdf[0] == "no":
                self._opts["netcdf"] = False
            else:
                self._opts["netcdf"] = True

        if args.plots != None:
            if args.plots[0].lower() == "no" or args.plots[0] == 0:
                self._opts["plots"] = False
            else:
                self._opts["plots"] = True

        if args.climatologies != None:
            if args.climatologies[0] == "no":
                self._opts["climatologies"] = False
            else:
                self._opts["climatologies"] = True

        self._opts["verbose"] = args.verbose

        if args.name != None:
            for i in args.name:
                self._opts["dsnames"].append(i[0])

        # Help create output file names
        if args.outputpre != None:
            self._opts["outputpre"] = args.outputpre[0]
        if args.outputpost != None:
            self._opts["outputpost"] = args.outputpost[0]

        # Output directory
        if args.outputdir != None:
            if not os.path.isdir(args.outputdir[0]):
                print "ERROR, output directory", args.outputdir[0], "does not exist!"
                quit()
            self._opts["outputdir"] = args.outputdir[0]

        if args.translate != "y":
            print args.translate
            print self._opts["translate"]
            quit()
        # Timestart assumes a string like "months since 2000". I can't find documentation on
        # toRelativeTime() so I have no idea how to check for valid input
        # This is required for some of the land model sets I've seen
        if args.timestart != None:
            self._opts["reltime"] = args.timestart

        # cdutil.setTimeBounds{bounds}(variable)
        if args.timebounds != None:
            self._opts["bounds"] = args.timebounds

        # Check if a user specified package actually exists
        # Note: This is case sensitive.....
        if args.packages != None:
            plist = []
            for x in args.packages:
                if x.upper() in self.all_packages.keys():
                    plist.append(x)
                elif x in self.all_packages.keys():
                    plist.append(x.lower())

            if plist == []:
                print "Package name(s) ", args.packages, " not valid"
                print "Valid package names: ", self.all_packages.keys()
                quit()
            else:
                self._opts["packages"] = plist

        # TODO: Requires exact case; probably make this more user friendly and look for mixed case
        if args.regions != None:
            rlist = []
            for x in args.regions:
                if x in all_regions.keys():
                    rlist.append(x)
            print "REGIONS: ", rlist
            self._opts["regions"] = rlist

        # Given user-selected packages, check for user specified sets
        # Note: If multiple packages have the same set names, then they are all added to the list.
        # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
        if self._opts["packages"] == None and args.sets != None:
            print "No package specified"
            self._opts["sets"] = args.sets

        if args.sets != None and self._opts["packages"] != None:
            # unfortuantely, we have to go through all of this....
            # there should be a non-init of the class method to list sets/packages/etc,
            # ie a dictionary perhaps?
            sets = []
            import metrics.fileio.filetable as ft
            import metrics.fileio.findfiles as fi
            import metrics.packages.diagnostic_groups

            package = self._opts["packages"]
            if package[0].lower() == "lmwg":
                import metrics.packages.lmwg.lmwg
            elif package[0].lower() == "amwg":
                import metrics.packages.amwg.amwg
            dtree = fi.dirtree_datafiles(self, pathid=0)
            filetable = ft.basic_filetable(dtree, self)
            dm = metrics.packages.diagnostic_groups.diagnostics_menu()

            pclass = dm[package[0].upper()]()

            slist = pclass.list_diagnostic_sets()
            keys = slist.keys()
            keys.sort()
            for k in keys:
                fields = k.split()
                for user in args.sets:
                    if user == fields[0]:
                        sets.append(user)
            self._opts["sets"] = sets
            if sets != args.sets:
                print "sets requested ", args.sets
                print "sets available: ", slist
                exit(1)

        # check for some varopts first.
        if args.varopts != None:
            self._opts["varopts"] = args.varopts
        # Add some hackery here to convert pressure level vars to var+varopts
        if args.vars != None:
            self._opts["vars"] = args.vars

            vpl = ["Z3_300", "Z3_500", "U_200", "T_200", "T_850"]
            vl = list(set(args.vars) - set(vpl))
            if vl == args.vars:  # no pressure level vars made it this far.
                print "No pressure level vars found in input vars list."
            else:  # more complicated....
                print "Pressure level vars found in input vars list.... Processing...."
                vopts = []
                if (
                    self._opts["varopts"] != [] and self._opts["varopts"] != None
                ):  # hopefully the user didn't also specify varopts....
                    print "User passed in varopts but there are pressure-level variables in the vars list."
                    print "This will append the pressure levels found to the varopts array"
                    # see which pressure level vars were passed. this will be the super set of pressure levels.
                if "Z3_300" in self._opts["vars"]:
                    vopts.append("300")
                    self._opts["vars"] = [x.replace("Z3_300", "Z3") for x in self._opts["vars"]]
                if "Z3_500" in self._opts["vars"]:
                    vopts.append("500")
                    self._opts["vars"] = [x.replace("Z3_500", "Z3") for x in self._opts["vars"]]
                if "T_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("T_200", "T") for x in self._opts["vars"]]
                if "T_850" in self._opts["vars"]:
                    vopts.append("850")
                    self._opts["vars"] = [x.replace("T_850", "T") for x in self._opts["vars"]]
                if "U_200" in self._opts["vars"]:
                    vopts.append("200")
                    self._opts["vars"] = [x.replace("U_200", "U") for x in self._opts["vars"]]
                vopts = list(set(vopts))
                if self._opts["varopts"] == [] or self._opts["varopts"] == None:
                    self._opts["varopts"] = vopts
                else:
                    self._opts["varopts"].extend(vopts)
                    self._opts["varopts"] = list(set(self._opts["varopts"]))
                print "Updated vars list: ", self._opts["vars"]

        # If --yearly is set, then we will add 'ANN' to the list of climatologies
        if args.yearly == True:
            self._opts["yearly"] = True
            self._opts["times"].append("ANN")

        # If --monthly is set, we add all months to the list of climatologies
        if args.monthly == True:
            self._opts["monthly"] = True
            self._opts["times"].extend(all_months)

        # If --seasonally is set, we add all 4 seasons to the list of climatologies
        if args.seasonally == True:
            self._opts["seasonally"] = True
            self._opts["times"].extend(all_seasons)

        # This allows specific individual months to be added to the list of climatologies
        if args.months != None:
            if args.monthly == True:
                print "Please specify just one of --monthly or --months"
                quit()
            else:
                mlist = [x for x in all_months if x in args.months]
                self._opts["times"] = self._opts["times"] + mlist

        # This allows specific individual years to be added to the list of climatologies.
        # Note: Checkign for valid input is impossible until we look at the dataset
        # This has to be special cased since typically someone will be saying
        # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
        if args.years != None:
            if args.yearly == True:
                print "Please specify just one of --yearly or --years"
                quit()
            else:
                self._opts["years"] = args.years

        if args.seasons != None:
            if args.seasonally == True:
                print "Please specify just one of --seasonally or --seasons"
                quit()
            else:
                slist = [x for x in all_seasons if x in args.seasons]
                self._opts["times"] = self._opts["times"] + slist
Example #8
0
    def processCmdLine(self):
        parser = argparse.ArgumentParser(
            description='UV-CDAT Climate Modeling Diagnostics',
            usage='%(prog)s --path1 [options]')

        parser.add_argument(
            '--path',
            '-p',
            action='append',
            nargs=1,
            help=
            "Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2."
        )
        parser.add_argument('--path2',
                            '-q',
                            action='append',
                            nargs=1,
                            help="Path to a second dataset.")
        parser.add_argument('--obspath',
                            action='append',
                            nargs=1,
                            help="Path to an observational dataset")
        parser.add_argument(
            '--cachepath',
            nargs=1,
            help="Path for temporary and cachced files. Defaults to /tmp")
        #      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
        #         help="The realm type. Current valid options are 'land' and 'atmosphere'")
        parser.add_argument(
            '--filter',
            '-f',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices."
        )
        parser.add_argument(
            '--filter2',
            '-g',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices."
        )
        parser.add_argument(
            '--new_filter',
            '-F',
            action='append',
            nargs=1,
            help=
            "A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices."
        )
        parser.add_argument(
            '--packages',
            '--package',
            '-k',
            nargs='+',
            help=
            "The diagnostic packages to run against the dataset(s). Multiple packages can be specified."
        )
        parser.add_argument(
            '--sets',
            '--set',
            '-s',
            nargs='+',
            help=
            "The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package"
        )
        parser.add_argument(
            '--vars',
            '--var',
            '-v',
            nargs='+',
            help=
            "Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL"
        )
        parser.add_argument(
            '--list',
            '-l',
            nargs=1,
            choices=[
                'sets', 'vars', 'variables', 'packages', 'seasons', 'regions',
                'translations', 'options'
            ],
            help=
            "Determine which packages, sets, regions, variables, and variable options are available"
        )
        # maybe eventually add compression level too....
        parser.add_argument(
            '--compress',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools"
        )  #no compression, add self state

        parser.add_argument(
            '--outputpre',
            nargs=1,
            help=
            "Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc"
        )
        parser.add_argument(
            '--outputpost',
            nargs=1,
            help=
            "Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc"
        )
        parser.add_argument(
            '--outputdir',
            '-O',
            nargs=1,
            help="Directory in which output files will be written.")

        parser.add_argument(
            '--seasons',
            nargs='+',
            choices=all_seasons,
            help="Specify which seasons to generate climatoogies for")
        parser.add_argument(
            '--years',
            nargs='+',
            help="Specify which years to include when generating climatologies"
        )
        parser.add_argument(
            '--months',
            nargs='+',
            choices=all_months,
            help="Specify which months to generate climatologies for")
        parser.add_argument(
            '--climatologies',
            '-c',
            nargs=1,
            choices=['no', 'yes'],
            help="Specifies whether or not climatologies should be generated")
        parser.add_argument(
            '--plots',
            '-t',
            nargs=1,
            choices=['no', 'yes'],
            help="Specifies whether or not plots should be generated")
        parser.add_argument('--plottype', nargs=1)
        parser.add_argument(
            '--precomputed',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc"
        )
        parser.add_argument(
            '--json',
            '-j',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce JSON output files as part of climatology/diags generation"
        )  # same
        parser.add_argument(
            '--netcdf',
            '-n',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce NetCDF output files as part of climatology/diags generation"
        )  # same
        parser.add_argument(
            '--xml',
            '-x',
            nargs=1,
            choices=['no', 'yes'],
            help=
            "Produce XML output files as part of climatology/diags generation")
        parser.add_argument(
            '--seasonally',
            action='store_true',
            help=
            "Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons"
        )
        parser.add_argument(
            '--monthly',
            action='store_true',
            help="Produce climatologies for all predefined months")
        parser.add_argument(
            '--yearly',
            action='store_true',
            help="Produce annual climatogolies for all years in the dataset")
        parser.add_argument(
            '--timestart',
            nargs=1,
            help=
            "Specify the starting time for the dataset, such as 'months since Jan 2000'"
        )
        parser.add_argument('--timebounds',
                            nargs=1,
                            choices=['daily', 'monthly', 'yearly'],
                            help="Specify the time boudns for the dataset")
        parser.add_argument(
            '--verbose',
            '-V',
            action='count',
            help=
            "Increase the verbosity level. Each -v option increases the verbosity more."
        )  # count
        parser.add_argument(
            '--name',
            action='append',
            nargs=1,
            help="Specify option names for the datasets for plot titles, etc"
        )  #optional name for the set
        # This will be the standard list of region names NCAR has
        parser.add_argument(
            '--regions',
            '--region',
            nargs='+',
            choices=all_regions.keys(),
            help=
            "Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'"
        )
        parser.add_argument('--starttime',
                            nargs=1,
                            help="Specify a start time in the dataset")
        parser.add_argument('--endtime',
                            nargs=1,
                            help="Specify an end time in the dataset")
        parser.add_argument(
            '--translate',
            nargs='?',
            default='y',
            help=
            "Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1"
        )
        parser.add_argument('--varopts',
                            nargs='+',
                            help="Variable auxillary options")

        args = parser.parse_args()

        if (args.list != None):
            if args.list[0] == 'translations':
                print "Default variable translations: "
                self.listTranslations()
                quit()
            if args.list[0] == 'regions':
                print "Available geographical regions: ", all_regions.keys()
                quit()

            if args.list[0] == 'seasons':
                print "Available seasons: ", all_seasons
                quit()

            if args.list[0] == 'packages':
                print "Listing available packages:"
                print self.all_packages.keys()
                quit()

            if args.list[0] == 'sets':
                if args.packages == None:
                    print "Please specify package before requesting available diags sets"
                    quit()
                for p in args.packages:
                    print 'Avaialble sets for package ', p, ':'
                    sets = self.listSets(p)
                    keys = sets.keys()
                    for k in keys:
                        print 'Set', k, ' - ', sets[k]
                quit()

            if args.list[0] == 'variables' or args.list[0] == 'vars':
                if args.path != None:
                    for i in args.path:
                        self._opts['path'].append(i[0])
                else:
                    print 'Must provide a dataset when requesting a variable listing'
                    quit()
                self.listVariables(args.packages, args.sets)
                quit()
            if args.list[0] == 'options':
                if args.path != None:
                    for i in args.path:
                        self._opts['path'].append(i[0])
                else:
                    print 'Must provide a dataset when requesting a variable listing'
                    quit()
                self.listVarOptions(args.packages, args.sets, args.vars)
                quit()

        # Generally if we've gotten this far, it means no --list was specified. If we don't have
        # at least a path, we should exit.
        if (args.path != None):
            for i in args.path:
                self._opts['path'].append(i[0])
        else:
            print 'Must specify a path or the --list option at a minimum.'
            print 'For help, type "diags --help".'
            quit()
        if (args.path2 != None):
            for i in args.path2:
                self._opts['path2'].append(i[0])

        if (args.obspath != None):
            for i in args.obspath:
                self._opts['obspath'].append(i[0])

        # TODO: Should some pre-defined filters be "nameable" here?
        if (args.filter !=
                None):  # Only supports one filter argument, see filter2.
            self._opts['filter'] = args.filter[0]
            self._opts['user_filter'] = True


#         for i in args.filter:
#            self._opts['filter'].append(i[0])
        if (args.filter2 != None):  # This is a second filter argument.
            self._opts['filter2'] = args.filter2[0]
            self._opts['user_filter'] = True
        if (args.new_filter !=
                None):  # like filter but with multiple arguments
            for i in args.new_filter:
                self._opts['new_filter'].append(i[0])

        if (args.cachepath != None):
            self._opts['cachepath'] = args.cachepath[0]

        self._opts['seasonally'] = args.seasonally
        self._opts['monthly'] = args.monthly

        if (args.starttime != None):
            self._opts['start'] = args.starttime[0]

        if (args.endtime != None):
            self._opts['end'] = args.endtime[0]

        # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
        # they are still set after you set them once in the python process.
        if (args.compress != None):
            if (args.compress[0] == 'no'):
                self._opts['compress'] = False
            else:
                self._opts['compress'] = True

        if self._opts['compress'] == True:
            print 'Enabling compression for output netCDF files'
            cdms2.setNetcdfShuffleFlag(1)
            cdms2.setNetcdfDeflateFlag(1)
            cdms2.setNetcdfDeflateLevelFlag(9)
        else:
            print 'Disabling compression for output netCDF files'
            cdms2.setNetcdfShuffleFlag(0)
            cdms2.setNetcdfDeflateFlag(0)
            cdms2.setNetcdfDeflateLevelFlag(0)

        if (args.json != None):
            if (args.json[0] == 'no'):
                self._opts['json'] = False
            else:
                self._opts['json'] = True
        if (args.xml != None):
            if (args.xml[0] == 'no'):
                self._opts['xml'] = False
            else:
                self._opts['xml'] = True

        if (args.netcdf != None):
            if (args.netcdf[0] == 'no'):
                self._opts['netcdf'] = False
            else:
                self._opts['netcdf'] = True

        if (args.plots != None):
            if (args.plots[0].lower() == 'no' or args.plots[0] == 0):
                self._opts['plots'] = False
            else:
                self._opts['plots'] = True

        if (args.climatologies != None):
            if (args.climatologies[0] == 'no'):
                self._opts['climatologies'] = False
            else:
                self._opts['climatologies'] = True

        self._opts['verbose'] = args.verbose

        if (args.name != None):
            for i in args.name:
                self._opts['dsnames'].append(i[0])

        # Help create output file names
        if (args.outputpre != None):
            self._opts['outputpre'] = args.outputpre[0]
        if (args.outputpost != None):
            self._opts['outputpost'] = args.outputpost[0]

        # Output directory
        if (args.outputdir != None):
            if not os.path.isdir(args.outputdir[0]):
                print "ERROR, output directory", args.outputdir[
                    0], "does not exist!"
                quit()
            self._opts['outputdir'] = args.outputdir[0]

        if (args.translate != 'y'):
            print args.translate
            print self._opts['translate']
            quit()
        # Timestart assumes a string like "months since 2000". I can't find documentation on
        # toRelativeTime() so I have no idea how to check for valid input
        # This is required for some of the land model sets I've seen
        if (args.timestart != None):
            self._opts['reltime'] = args.timestart

        # cdutil.setTimeBounds{bounds}(variable)
        if (args.timebounds != None):
            self._opts['bounds'] = args.timebounds

        # Check if a user specified package actually exists
        # Note: This is case sensitive.....
        if (args.packages != None):
            plist = []
            for x in args.packages:
                if x.upper() in self.all_packages.keys():
                    plist.append(x)
                elif x in self.all_packages.keys():
                    plist.append(x.lower())

            if plist == []:
                print 'Package name(s) ', args.packages, ' not valid'
                print 'Valid package names: ', self.all_packages.keys()
                quit()
            else:
                self._opts['packages'] = plist

        # TODO: Requires exact case; probably make this more user friendly and look for mixed case
        if (args.regions != None):
            rlist = []
            for x in args.regions:
                if x in all_regions.keys():
                    rlist.append(x)
            print 'REGIONS: ', rlist
            self._opts['regions'] = rlist

        # Given user-selected packages, check for user specified sets
        # Note: If multiple packages have the same set names, then they are all added to the list.
        # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
        if (self._opts['packages'] == None and args.sets != None):
            print 'No package specified'
            self._opts['sets'] = args.sets

        if (args.sets != None and self._opts['packages'] != None):
            # unfortuantely, we have to go through all of this....
            # there should be a non-init of the class method to list sets/packages/etc,
            # ie a dictionary perhaps?
            sets = []
            import metrics.fileio.filetable as ft
            import metrics.fileio.findfiles as fi
            import metrics.packages.diagnostic_groups
            package = self._opts['packages']
            if package[0].lower() == 'lmwg':
                import metrics.packages.lmwg.lmwg
            elif package[0].lower() == 'amwg':
                import metrics.packages.amwg.amwg
            dtree = fi.dirtree_datafiles(self, pathid=0)
            filetable = ft.basic_filetable(dtree, self)
            dm = metrics.packages.diagnostic_groups.diagnostics_menu()

            pclass = dm[package[0].upper()]()

            slist = pclass.list_diagnostic_sets()
            keys = slist.keys()
            keys.sort()
            for k in keys:
                fields = k.split()
                for user in args.sets:
                    if user == fields[0]:
                        sets.append(user)
            self._opts['sets'] = sets
            if sets != args.sets:
                print 'sets requested ', args.sets
                print 'sets available: ', slist
                exit(1)

        # check for some varopts first.
        if (args.varopts != None):
            self._opts['varopts'] = args.varopts
        # Add some hackery here to convert pressure level vars to var+varopts
        if args.vars != None:
            self._opts['vars'] = args.vars

            vpl = ['Z3_300', 'Z3_500', 'U_200', 'T_200', 'T_850']
            vl = list(set(args.vars) - set(vpl))
            if vl == args.vars:  # no pressure level vars made it this far.
                print 'No pressure level vars found in input vars list.'
            else:  # more complicated....
                print 'Pressure level vars found in input vars list.... Processing....'
                vopts = []
                if self._opts['varopts'] != [] and self._opts[
                        'varopts'] != None:  # hopefully the user didn't also specify varopts....
                    print 'User passed in varopts but there are pressure-level variables in the vars list.'
                    print 'This will append the pressure levels found to the varopts array'
                    # see which pressure level vars were passed. this will be the super set of pressure levels.
                if 'Z3_300' in self._opts['vars']:
                    vopts.append('300')
                    self._opts['vars'] = [
                        x.replace('Z3_300', 'Z3') for x in self._opts['vars']
                    ]
                if 'Z3_500' in self._opts['vars']:
                    vopts.append('500')
                    self._opts['vars'] = [
                        x.replace('Z3_500', 'Z3') for x in self._opts['vars']
                    ]
                if 'T_200' in self._opts['vars']:
                    vopts.append('200')
                    self._opts['vars'] = [
                        x.replace('T_200', 'T') for x in self._opts['vars']
                    ]
                if 'T_850' in self._opts['vars']:
                    vopts.append('850')
                    self._opts['vars'] = [
                        x.replace('T_850', 'T') for x in self._opts['vars']
                    ]
                if 'U_200' in self._opts['vars']:
                    vopts.append('200')
                    self._opts['vars'] = [
                        x.replace('U_200', 'U') for x in self._opts['vars']
                    ]
                vopts = list(set(vopts))
                if self._opts['varopts'] == [] or self._opts['varopts'] == None:
                    self._opts['varopts'] = vopts
                else:
                    self._opts['varopts'].extend(vopts)
                    self._opts['varopts'] = list(set(self._opts['varopts']))
                print 'Updated vars list: ', self._opts['vars']

        # If --yearly is set, then we will add 'ANN' to the list of climatologies
        if (args.yearly == True):
            self._opts['yearly'] = True
            self._opts['times'].append('ANN')

        # If --monthly is set, we add all months to the list of climatologies
        if (args.monthly == True):
            self._opts['monthly'] = True
            self._opts['times'].extend(all_months)

        # If --seasonally is set, we add all 4 seasons to the list of climatologies
        if (args.seasonally == True):
            self._opts['seasonally'] = True
            self._opts['times'].extend(all_seasons)

        # This allows specific individual months to be added to the list of climatologies
        if (args.months != None):
            if (args.monthly == True):
                print "Please specify just one of --monthly or --months"
                quit()
            else:
                mlist = [x for x in all_months if x in args.months]
                self._opts['times'] = self._opts['times'] + mlist

        # This allows specific individual years to be added to the list of climatologies.
        # Note: Checkign for valid input is impossible until we look at the dataset
        # This has to be special cased since typically someone will be saying
        # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
        if (args.years != None):
            if (args.yearly == True):
                print "Please specify just one of --yearly or --years"
                quit()
            else:
                self._opts['years'] = args.years

        if (args.seasons != None):
            if (args.seasonally == True):
                print "Please specify just one of --seasonally or --seasons"
                quit()
            else:
                slist = [x for x in all_seasons if x in args.seasons]
                self._opts['times'] = self._opts['times'] + slist
Example #9
0
   def processCmdLine(self):
      parser = argparse.ArgumentParser(
         description='UV-CDAT Climate Modeling Diagnostics', 
         usage='%(prog)s --path1 [options]')

      parser.add_argument('--path', '-p', action='append', nargs=1, 
         help="Path(s) to dataset(s). This is required.  If two paths need different filters, set one here and one in path2.")
      parser.add_argument('--path2', '-q', action='append', nargs=1, 
         help="Path to a second dataset.")
      parser.add_argument('--obspath', action='append', nargs=1,
                          help="Path to an observational dataset")
      parser.add_argument('--cachepath', nargs=1,
         help="Path for temporary and cachced files. Defaults to /tmp")
#      parser.add_argument('--realm', '-r', nargs=1, choices=self.realm_types,
#         help="The realm type. Current valid options are 'land' and 'atmosphere'")
      parser.add_argument('--filter', '-f', nargs=1, 
         help="A filespec filter. This will be applied to the dataset path(s) (--path option) to narrow down file choices.")
      parser.add_argument('--filter2', '-g', nargs=1, 
         help="A filespec filter. This will be applied to the second dataset path (--path2 option) to narrow down file choices.")
      parser.add_argument('--new_filter', '-F', action='append', nargs=1, 
         help="A filespec filter. This will be applied to the corresponding dataset path to narrow down file choices.")
      parser.add_argument('--packages', '--package', '-k', nargs='+', 
         help="The diagnostic packages to run against the dataset(s). Multiple packages can be specified.")
      parser.add_argument('--sets', '--set', '-s', nargs='+', 
         help="The sets within a diagnostic package to run. Multiple sets can be specified. If multiple packages were specified, the sets specified will be searched for in each package") 
      parser.add_argument('--vars', '--var', '-v', nargs='+', 
         help="Specify variables of interest to process. The default is all variables which can also be specified with the keyword ALL") 
      parser.add_argument('--list', '-l', nargs=1, choices=['sets', 'vars', 'variables', 'packages', 'seasons', 'regions', 'translations', 'options'], 
         help="Determine which packages, sets, regions, variables, and variable options are available")
         # maybe eventually add compression level too....
      parser.add_argument('--compress', nargs=1, choices=['no', 'yes'],
         help="Turn off netCDF compression. This can be required for other utilities to be able to process the output files (e.g. parallel netCDF based tools") #no compression, add self state

      parser.add_argument('--outputpre', nargs=1,
         help="Specify an output filename prefix to be prepended to all file names created internally. For example --outputpre myout might generate myout-JAN.nc, etc")
      parser.add_argument('--outputpost', nargs=1,
         help="Specify an output filename postfix to be appended to all file names created internally. For example --outputpost _OBS might generate set1-JAN_OBS.nc, etc")
      parser.add_argument('--outputdir', '-O', nargs=1,
         help="Directory in which output files will be written." )

      parser.add_argument('--seasons', nargs='+', choices=all_seasons,
         help="Specify which seasons to generate climatoogies for")
      parser.add_argument('--years', nargs='+',
         help="Specify which years to include when generating climatologies") 
      parser.add_argument('--months', nargs='+', choices=all_months,
         help="Specify which months to generate climatologies for")
      parser.add_argument('--climatologies', '-c', nargs=1, choices=['no','yes'],
         help="Specifies whether or not climatologies should be generated")
      parser.add_argument('--plots', '-t', nargs=1, choices=['no','yes'],
         help="Specifies whether or not plots should be generated")
      parser.add_argument('--plottype', nargs=1)
      parser.add_argument('--precomputed', nargs=1, choices=['no','yes'], 
         help="Specifies whether standard climatologies are stored with the dataset (*-JAN.nc, *-FEB.nc, ... *-DJF.nc, *-year0.nc, etc")
      parser.add_argument('--json', '-j', nargs=1, choices=['no', 'yes'],
         help="Produce JSON output files as part of climatology/diags generation") # same
      parser.add_argument('--netcdf', '-n', nargs=1, choices=['no', 'yes'],
         help="Produce NetCDF output files as part of climatology/diags generation") # same
      parser.add_argument('--xml', '-x', nargs=1, choices=['no', 'yes'],
         help="Produce XML output files as part of climatology/diags generation")
      parser.add_argument('--seasonally', action='store_true',
         help="Produce climatologies for all of the defined seasons. To get a list of seasons, run --list seasons")
      parser.add_argument('--monthly', action='store_true',
         help="Produce climatologies for all predefined months")
      parser.add_argument('--yearly', action='store_true',
         help="Produce annual climatogolies for all years in the dataset")
      parser.add_argument('--timestart', nargs=1,
         help="Specify the starting time for the dataset, such as 'months since Jan 2000'")
      parser.add_argument('--timebounds', nargs=1, choices=['daily', 'monthly', 'yearly'],
         help="Specify the time boudns for the dataset")
      parser.add_argument('--verbose', '-V', action='count',
         help="Increase the verbosity level. Each -v option increases the verbosity more.") # count
      parser.add_argument('--name', action='append', nargs=1,
         help="Specify option names for the datasets for plot titles, etc") #optional name for the set
      # This will be the standard list of region names NCAR has
      parser.add_argument('--regions', '--region', nargs='+', choices=all_regions.keys(),
         help="Specify a geographical region of interest. Note: Multi-word regions need quoted, e.g. 'Central Canada'")
      parser.add_argument('--starttime', nargs=1,
         help="Specify a start time in the dataset")
      parser.add_argument('--endtime', nargs=1, 
         help="Specify an end time in the dataset")
      parser.add_argument('--translate', nargs='?', default='y',
         help="Enable translation for obs sets to datasets. Optional provide a colon separated input to output list e.g. DSVAR1:OBSVAR1")
      parser.add_argument('--varopts', nargs='+',
         help="Variable auxillary options")



      args = parser.parse_args()

      if(args.list != None):
         if args.list[0] == 'translations':
            print "Default variable translations: "
            self.listTranslations()
            quit()
         if args.list[0] == 'regions':
            print "Available geographical regions: ", all_regions.keys()
            quit()

         if args.list[0] == 'seasons':
            print "Available seasons: ", all_seasons
            quit()

         if args.list[0] == 'packages':
            print "Listing available packages:"
            print self.all_packages.keys()
            quit()

         
         if args.list[0] == 'sets':
            if args.packages == None:
               print "Please specify package before requesting available diags sets"
               quit()
            for p in args.packages:
               print 'Avaialble sets for package ', p, ':'
               sets = self.listSets(p)
               keys = sets.keys()
               for k in keys:
                  print 'Set',k, ' - ', sets[k]
            quit()
               
         if args.list[0] == 'variables' or args.list[0] == 'vars':
            if args.path != None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVariables(args.packages, args.sets)
            quit()
         if args.list[0] == 'options':
            if args.path!= None:
               for i in args.path:
                  self._opts['path'].append(i[0])
            else:
               print 'Must provide a dataset when requesting a variable listing'
               quit()
            self.listVarOptions(args.packages, args.sets, args.vars)
            quit()

      # Generally if we've gotten this far, it means no --list was specified. If we don't have
      # at least a path, we should exit.
      if(args.path != None):
         for i in args.path:
            self._opts['path'].append(i[0])
      else:
         print 'Must specify a path or the --list option at a minimum.'
         print 'For help, type "diags --help".'
         quit()
      if(args.path2 != None):
         for i in args.path2:
            self._opts['path2'].append(i[0])

      if(args.obspath != None):
         for i in args.obspath:
            self._opts['obspath'].append(i[0])

      # TODO: Should some pre-defined filters be "nameable" here?
      if(args.filter != None): # Only supports one filter argument, see filter2.
         self._opts['filter'] = args.filter[0]
         self._opts['user_filter'] = True
#         for i in args.filter:
#            self._opts['filter'].append(i[0])
      if(args.filter2 != None): # This is a second filter argument.
         self._opts['filter2'] = args.filter2[0]
         self._opts['user_filter'] = True
      if(args.new_filter != None):  # like filter but with multiple arguments
         for i in args.new_filter:
            self._opts['new_filter'].append(i[0])

      if(args.cachepath != None):
         self._opts['cachepath'] = args.cachepath[0]

      self._opts['seasonally'] = args.seasonally
      self._opts['monthly'] = args.monthly

      if(args.varopts != None):
         self._opts['varopts'] = args.varopts

      if(args.starttime != None):
         self._opts['start'] = args.starttime[0]

      if(args.endtime != None):
         self._opts['end'] = args.endtime[0]

      # I checked; these are global and it doesn't seem to matter if you import cdms2 multiple times;
      # they are still set after you set them once in the python process.
      if(args.compress != None):
         if(args.compress[0] == 'no'):
            self._opts['compress'] = False
         else:
            self._opts['compress'] = True


      if self._opts['compress'] == True:
         print 'Enabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(1)
         cdms2.setNetcdfDeflateFlag(1)
         cdms2.setNetcdfDeflateLevelFlag(9)
      else:
         print 'Disabling compression for output netCDF files'
         cdms2.setNetcdfShuffleFlag(0)
         cdms2.setNetcdfDeflateFlag(0)
         cdms2.setNetcdfDeflateLevelFlag(0)
         

      if(args.json != None):
         if(args.json[0] == 'no'):
            self._opts['json'] = False
         else:
            self._opts['json'] = True
      if(args.xml != None):
         if(args.xml[0] == 'no'):
            self._opts['xml'] = False
         else:
            self._opts['xml'] = True

      if(args.netcdf != None):
         if(args.netcdf[0] == 'no'):
            self._opts['netcdf'] = False
         else:
            self._opts['netcdf'] = True

      if(args.plots != None):
         if(args.plots[0].lower() == 'no' or args.plots[0] == 0):
            self._opts['plots'] = False
         else:
            self._opts['plots'] = True

      if(args.climatologies != None):
         if(args.climatologies[0] == 'no'):
            self._opts['climatologies'] = False
         else:
            self._opts['climatologies'] = True

      self._opts['verbose'] = args.verbose

      if(args.name != None):
         for i in args.name:
            self._opts['dsnames'].append(i[0])

      # Help create output file names
      if(args.outputpre != None):
         self._opts['outputpre'] = args.outputpre[0]
      if(args.outputpost != None):
         self._opts['outputpost'] = args.outputpost[0]

      # Output directory
      if(args.outputdir != None):
         if not os.path.isdir(args.outputdir[0]):
            print "ERROR, output directory",args.outputdir[0],"does not exist!"
            quit()
         self._opts['outputdir'] = args.outputdir[0]

      if(args.translate != 'y'):
         print args.translate
         print self._opts['translate']
         quit()
      # Timestart assumes a string like "months since 2000". I can't find documentation on
      # toRelativeTime() so I have no idea how to check for valid input
      # This is required for some of the land model sets I've seen
      if(args.timestart != None):
         self._opts['reltime'] = args.timestart
         
      # cdutil.setTimeBounds{bounds}(variable)
      if(args.timebounds != None):
         self._opts['bounds'] = args.timebounds

      # Check if a user specified package actually exists
      # Note: This is case sensitive.....
      if(args.packages != None):
         plist = []
         for x in args.packages:
            if x.upper() in self.all_packages.keys():
               plist.append(x)
            elif x in self.all_packages.keys():
               plist.append(x.lower())

         if plist == []:
            print 'Package name(s) ', args.packages, ' not valid'
            print 'Valid package names: ', self.all_packages.keys()
            quit()
         else:
            self._opts['packages'] = plist


      # TODO: Requires exact case; probably make this more user friendly and look for mixed case
      if(args.regions != None):
         rlist = []
         for x in args.regions:
            if x in all_regions.keys():
               rlist.append(x)
         print 'REGIONS: ', rlist
         self._opts['regions'] = rlist

      # Given user-selected packages, check for user specified sets
      # Note: If multiple packages have the same set names, then they are all added to the list.
      # This might be bad since there is no differentiation of lwmg['id==set'] and lmwg2['id==set']
      if(self._opts['packages'] == None and args.sets != None):
         print 'No package specified'
         self._opts['sets'] = args.sets

      if(args.sets != None and self._opts['packages'] != None):
         # unfortuantely, we have to go through all of this....
         # there should be a non-init of the class method to list sets/packages/etc,
         # ie a dictionary perhaps?
         sets = []
         import metrics.fileio.filetable as ft
         import metrics.fileio.findfiles as fi
         import metrics.packages.diagnostic_groups 
         package = self._opts['packages']
         if package[0].lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
         elif package[0].lower()=='amwg':
            import metrics.packages.amwg.amwg
         dtree = fi.dirtree_datafiles(self, pathid=0)
         filetable = ft.basic_filetable(dtree, self)
         dm = metrics.packages.diagnostic_groups.diagnostics_menu()

         pclass = dm[package[0].upper()]()

         slist = pclass.list_diagnostic_sets()
         keys = slist.keys()
         keys.sort()
         for k in keys:
            fields = k.split()
            for user in args.sets:
               if user == fields[0]:
                  sets.append(user)
         self._opts['sets'] = sets
         if sets != args.sets:
            print 'sets requested ', args.sets
            print 'sets available: ', slist
            exit(1)

      # TODO: Check against an actual list of variables from the set
      if args.vars != None:
         self._opts['vars'] = args.vars

      # If --yearly is set, then we will add 'ANN' to the list of climatologies
      if(args.yearly == True):
         self._opts['yearly'] = True
         self._opts['times'].append('ANN')

      # If --monthly is set, we add all months to the list of climatologies
      if(args.monthly == True):
         self._opts['monthly'] = True
         self._opts['times'].extend(all_months)

      # If --seasonally is set, we add all 4 seasons to the list of climatologies
      if(args.seasonally == True):
         self._opts['seasonally'] = True
         self._opts['times'].extend(all_seasons)

      # This allows specific individual months to be added to the list of climatologies
      if(args.months != None):
         if(args.monthly == True):
            print "Please specify just one of --monthly or --months"
            quit()
         else:
            mlist = [x for x in all_months if x in args.months]
            self._opts['times'] = self._opts['times']+mlist

      # This allows specific individual years to be added to the list of climatologies.
      # Note: Checkign for valid input is impossible until we look at the dataset
      # This has to be special cased since typically someone will be saying
      # "Generate climatologies for seasons for years X, Y, and Z of my dataset"
      if(args.years != None):
         if(args.yearly == True):
            print "Please specify just one of --yearly or --years"
            quit()
         else:
            self._opts['years'] = args.years

      if(args.seasons != None):
         if(args.seasonally == True):
            print "Please specify just one of --seasonally or --seasons"
            quit()
         else:
            slist = [x for x in all_seasons if x in args.seasons]
            self._opts['times'] = self._opts['times']+slist
Example #10
0
   def verifyOptions(self):

   # TODO Determine if path is a single file, e.g. a cdscan generated XML file or a directory
   # and if it is a directory, if there is already a .xml file, ignore it or not. Might
   # need an option for that ignore option?
   # Other thigns to (eventually) verify:
   #    1) Start/end years are valid and within the range specified
      import metrics.fileio.filetable as ft
      import metrics.fileio.findfiles as fi
      import metrics.packages.diagnostic_groups
      import os
      if len(self._opts['model']) == 0 and len(self._opts['obs']) == 0:
         logging.critical('At least one model or obs set needs describted')
         quit()
      if len(self._opts['model']) != 0:
         for i in range(len(self._opts['model'])):
            if self._opts['model'][i]['path'] == None or self._opts['model'][i]['path'] == '':
               logging.critical('Each dataset must have a path provided')
               quit()
            # check if the path exists
            if not os.path.exists(self._opts['model'][i]['path']):
               logging.critical('Path - %s - does not exist', self._opts['model'][i]['path'])
               quit()
      if len(self._opts['obs']) != 0:
         for i in range(len(self._opts['obs'])):
            if self._opts['obs'][i]['path'] == None or self._opts['obs'][i]['path'] == '':
               logging.critical('Each dataset must have a path provided')
               quit()
            if not os.path.exists(self._opts['obs'][i]['path']):
               logging.critical('Obs Path - %s - does not exist', self._opts['obs'][i]['path'])
               quit()
#      if(self._opts['package'] == None):
#         print 'Please specify a package e.g. AMWG, LMWG, etc'
#         quit()

      # A path is guaranteed, even if it is just /tmp. So check for it
      # We shouldn't get here anyway. This is primarily in case something gets postpended to the user-specified outputdir
      # in options(). Currently that happens elsewhere, but seems like a raesonable check to keep here anyway.
      if not os.path.exists(self._opts['output']['outputdir']):
         logging.critical('output directory %s does not exist', self._opts['output']['outputdir'])
         quit()

      if(self._opts['package'] != None):
         keys = self.all_packages.keys()
         ukeys = []
         for k in keys:
            ukeys.append(k.upper())

         if self._opts['package'].upper() not in ukeys:
            logging.critical('Package %s not found in the list of package names - %s', self._opts['package'], self.all_packages.keys())
            quit()

      # Should we check for random case too? I suppose.
      if(self._opts['regions'] != []):
         rlist = []
         for x in self._opts['regions']:
            if x in all_regions.keys():
               rlist.append(x)
         rlist.sort()
         self._opts['regions'].sort()
         if rlist != self._opts['regions']:
            logging.critical('Unknown region[s] specified: %s', list(set(self._opts['regions']) - set(rlist)))
            quit()
      if(self._opts['sets'] != None and self._opts['package'] != None):
         sets = []
         package = self._opts['package']
         if package.lower() == 'lmwg':
            import metrics.packages.lmwg.lmwg
         elif package.lower()=='amwg':
            import metrics.packages.amwg.amwg
         dtree = fi.dirtree_datafiles(self, modelid=0)
         filetable = ft.basic_filetable(dtree, self)