Esempio n. 1
0
    def execute_model(workspace, source_parameter_set):
        """Helper function to run a model from its parameter set file.

        Args:
            workspace (str): The path to the workspace to use for the test run.
                All files will be written here.
            source_parameter_set (str): The path to the parameter set from
                which the args dict and model name should be loaded.

        Returns:
            ``None``
        """
        from natcap.invest import datastack

        source_args = datastack.extract_parameter_set(source_parameter_set)
        model_name = source_args.model_name

        datastack_archive_path = os.path.join(workspace,
                                              'datastack.invs.tar.gz')
        datastack.build_datastack_archive(source_args.args, model_name,
                                          datastack_archive_path)

        extraction_dir = os.path.join(workspace, 'archived_data')
        args = datastack.extract_datastack_archive(datastack_archive_path,
                                                   extraction_dir)
        args['workspace_dir'] = os.path.join(workspace, 'workspace')

        # validate the args for good measure
        module = importlib.import_module(name=model_name)
        errors = module.validate(args)
        if errors != []:
            raise AssertionError(f"Errors founds: {pprint.pformat(errors)}")

        module.execute(args)
def lobster_jess():
    paramset = datastack.extract_parameter_set(
        '../../invest/data/invest-sample-data/spiny_lobster_belize.invs.json')
    args = paramset.args.copy()

    pprint.pprint(args)

    # lobster is age-based.
    model_type = 'age'
    #alpha = numpy.float64(args['alpha'])
    #beta = numpy.float64(args['beta'])
    # n_init_recruits = numpy.int64(args['total_init_recruits'])

    # These are the parameters that Jess and Jodie want to use, but their
    # migrations spreadsheet isn't referencing their modified sheet.
    n_init_recruits = 4686959
    alpha = numpy.float64(1000)
    beta = numpy.float64(0.00000016069)
    recruitment = beverton_holt_1
    args['alpha'] = alpha
    args['beta'] = beta
    args['total_init_recruits'] = n_init_recruits

    # These are the values in the Model_Lobster sheet
    #n_init_recruits = 4686959
    #alpha = numpy.float64(5770000)
    #beta = numpy.float64(2885000)
    #recruitment = beverton_holt_2

    LOGGER.info('Spiny Lobster - Jess')
    model(args, recruitment=recruitment)
Esempio n. 3
0
    def test_relative_parameter_set(self):
        """Datastack: test relative parameter set."""
        from natcap.invest import __version__
        from natcap.invest import datastack

        params = {
            'a':
            1,
            'b':
            'hello there',
            'c':
            'plain bytestring',
            'nested': {
                'level1': 123,
            },
            'foo':
            os.path.join(self.workspace, 'foo.txt'),
            'bar':
            os.path.join(self.workspace, 'foo.txt'),
            'file_list': [
                os.path.join(self.workspace, 'file1.txt'),
                os.path.join(self.workspace, 'file2.txt'),
            ],
            'data_dir':
            os.path.join(self.workspace, 'data_dir'),
            'temp_workspace':
            self.workspace
        }
        modelname = 'natcap.invest.foo'
        paramset_filename = os.path.join(self.workspace, 'paramset.json')

        # make the sample data so filepaths are interpreted correctly
        for file_base in ('foo', 'bar', 'file1', 'file2'):
            test_filepath = os.path.join(self.workspace, file_base + '.txt')
            open(test_filepath, 'w').write('hello!')
        os.makedirs(params['data_dir'])

        # Write the parameter set
        datastack.build_parameter_set(params,
                                      modelname,
                                      paramset_filename,
                                      relative=True)

        # Check that the written parameter set file contains relative paths
        raw_args = json.load(open(paramset_filename))['args']
        self.assertEqual(raw_args['foo'], 'foo.txt')
        self.assertEqual(raw_args['bar'], 'foo.txt')
        self.assertEqual(raw_args['file_list'], ['file1.txt', 'file2.txt'])
        self.assertEqual(raw_args['data_dir'], 'data_dir')
        self.assertEqual(raw_args['temp_workspace'], '.')

        # Read back the parameter set and verify the returned paths are
        # absolute
        args, callable_name, invest_version = datastack.extract_parameter_set(
            paramset_filename)

        self.assertEqual(args, params)
        self.assertEqual(invest_version, __version__)
        self.assertEqual(callable_name, modelname)
def lobster():
    paramset = datastack.extract_parameter_set(
        '../../invest/data/invest-sample-data/spiny_lobster_belize.invs.json')
    args = paramset.args.copy()
    args['total_init_recruits'] = 4686959  # to match spreadsheet

    LOGGER.info('Spiny Lobster - Sample Data')
    spawners, harvest = model(args, recruitment=beverton_holt_2)

    check_expected(spawners, 2847870)
    check_expected(harvest, 963451)
Esempio n. 5
0
    def test_datastack_parameter_set(self):
        """Datastack: test datastack parameter set."""
        from natcap.invest import __version__
        from natcap.invest import datastack

        params = {
            'a':
            1,
            'b':
            'hello there',
            'c':
            'plain bytestring',
            'd':
            'true',
            'nested': {
                'level1': 123,
            },
            'foo':
            os.path.join(self.workspace, 'foo.txt'),
            'bar':
            os.path.join(self.workspace, 'foo.txt'),
            'file_list': [
                os.path.join(self.workspace, 'file1.txt'),
                os.path.join(self.workspace, 'file2.txt'),
            ],
            'data_dir':
            os.path.join(self.workspace, 'data_dir'),
            'raster':
            os.path.join(DATA_DIR, 'dem'),
            'vector':
            os.path.join(DATA_DIR, 'watersheds.shp'),
            'table':
            os.path.join(DATA_DIR, 'carbon', 'carbon_pools_samp.csv'),
        }
        modelname = 'natcap.invest.foo'
        paramset_filename = os.path.join(self.workspace, 'paramset.json')

        # Write the parameter set
        datastack.build_parameter_set(params, modelname, paramset_filename)

        # Read back the parameter set
        args, callable_name, invest_version = datastack.extract_parameter_set(
            paramset_filename)

        # parameter set calculations normalizes all paths.
        # These are relative paths and must be patched.
        normalized_params = params.copy()
        normalized_params['d'] = True  # should be read in as a bool
        for key in ('raster', 'vector', 'table'):
            normalized_params[key] = os.path.normpath(normalized_params[key])

        self.assertEqual(args, normalized_params)
        self.assertEqual(invest_version, __version__)
        self.assertEqual(callable_name, modelname)
def shrimp():
    # Uses fixed recruitment
    # Stage-based model
    # No migration
    LOGGER.info('White Shrimp')
    paramset = datastack.extract_parameter_set(
        '../../invest/data/invest-sample-data/white_shrimp_galveston_bay.invs.json'
    )
    args = paramset.args.copy()

    spawners, harvest = model(args, recruitment=fixed)

    check_expected(spawners, 2.16e11)  # fixed recruitment
    check_expected(harvest, 3096000)
def dungeness_crab():
    # Ricker recruitment
    # spawners are individuals
    # age-based population model
    # no migration
    # Sex-specific population
    LOGGER.info('Dungeness Crab')
    paramset = datastack.extract_parameter_set(
        '../../invest/data/invest-sample-data/dungeness_crab_hood_canal.invs.json'
    )
    args = paramset.args.copy()

    spawners, harvest = model(args, recruitment=ricker)
    check_expected(spawners, 4051538)
    check_expected(harvest, 526987)
def blue_crab():
    # Ricker recruitment
    # spawners are individuals
    # age-based population model
    # non-sex-specific population
    # harvest by individuals
    LOGGER.info('Blue Crab')
    paramset = datastack.extract_parameter_set(
        '../../invest/data/invest-sample-data/blue_crab_galveston_bay.invs.json'
    )
    args = paramset.args.copy()

    spawners, harvest = model(args, recruitment=ricker)
    check_expected(spawners, 42644460)
    check_expected(harvest, 24798419)
Esempio n. 9
0
    def test_relative_parameter_set_windows(self):
        """Datastack: test relative parameter set paths saved linux style."""
        from natcap.invest import __version__
        from natcap.invest import datastack

        params = {
            'foo':
            os.path.join(self.workspace, 'foo.txt'),
            'bar':
            os.path.join(self.workspace, 'inter_dir', 'bar.txt'),
            'doh':
            os.path.join(self.workspace, 'inter_dir', 'inter_inter_dir',
                         'doh.txt'),
            'data_dir':
            os.path.join(self.workspace, 'data_dir'),
        }
        os.makedirs(
            os.path.join(self.workspace, 'inter_dir', 'inter_inter_dir'))
        modelname = 'natcap.invest.foo'
        paramset_filename = os.path.join(self.workspace, 'paramset.json')

        # make the sample data so filepaths are interpreted correctly
        for base_name in ('foo', 'bar', 'doh'):
            open(params[base_name], 'w').write('hello!')
        os.makedirs(params['data_dir'])

        # Write the parameter set
        datastack.build_parameter_set(params,
                                      modelname,
                                      paramset_filename,
                                      relative=True)

        # Check that the written parameter set file contains relative paths
        raw_args = json.load(open(paramset_filename))['args']
        self.assertEqual(raw_args['foo'], 'foo.txt')
        # Expecting linux style path separators for Windows
        self.assertEqual(raw_args['bar'], 'inter_dir/bar.txt')
        self.assertEqual(raw_args['doh'], 'inter_dir/inter_inter_dir/doh.txt')
        self.assertEqual(raw_args['data_dir'], 'data_dir')

        # Read back the parameter set and verify the returned paths are
        # absolute
        args, callable_name, invest_version = datastack.extract_parameter_set(
            paramset_filename)

        self.assertEqual(args, params)
        self.assertEqual(invest_version, __version__)
        self.assertEqual(callable_name, modelname)
Esempio n. 10
0
def main(sampledatadir):
    """Do validation for each datastack and store error messages.

    Parameters:
        sampledatadir (string): path to the invest-sample-data repository,
            where '*invs.json' datastack files are expected to be in the root.

    Returns:
        None

    Raises:
        ValueError if any module's `validate` function issued warnings.
    """
    validation_messages = ''
    datastacks = glob.glob(os.path.join(sampledatadir, '**/*.json'))
    if not datastacks:
        raise ValueError(f'no json files found in {sampledatadir}')

    for datastack_path in datastacks:
        paramset = datastack.extract_parameter_set(datastack_path)
        if 'workspace_dir' in paramset.args and \
                paramset.args['workspace_dir'] != '':
            msg = ('%s : workspace_dir should not be defined '
                   'for sample datastacks' % datastack_path)
            validation_messages += os.linesep + msg
            LOGGER.error(msg)
        else:
            paramset.args['workspace_dir'] = tempfile.mkdtemp()
        model_module = importlib.import_module(name=paramset.model_name)

        model_warnings = []  # define here in case of uncaught exception.
        try:
            LOGGER.info('validating %s ', datastack_path)
            model_warnings = getattr(model_module, 'validate')(paramset.args)
        except AttributeError as err:
            # If there was no validate function, don't crash but raise it later.
            model_warnings = err
        finally:
            if model_warnings:
                LOGGER.error(model_warnings)
                validation_messages += (os.linesep + datastack_path + ': ' +
                                        pprint.pformat(model_warnings))
            if os.path.exists(paramset.args['workspace_dir']):
                os.rmdir(paramset.args['workspace_dir'])

    if validation_messages:
        raise ValueError(validation_messages)
Esempio n. 11
0
    def test_mixed_path_separators_in_paramset_windows(self):
        """Datastacks: parameter sets must handle windows and linux paths."""
        from natcap.invest import datastack

        args = {
            'windows_path': os.path.join(self.workspace,
                                         'dir1\\filepath1.txt'),
            'linux_path': os.path.join(self.workspace, 'dir2/filepath2.txt'),
        }
        for filepath in args.values():
            normalized_path = os.path.normpath(filepath.replace('\\', os.sep))
            try:
                os.makedirs(os.path.dirname(normalized_path))
            except OSError:
                pass

            with open(normalized_path, 'w') as open_file:
                open_file.write('the contents of this file do not matter.')

        paramset_path = os.path.join(self.workspace, 'paramset.invest.json')
        # Windows paths should be saved with linux-style separators
        datastack.build_parameter_set(args,
                                      'sample_model',
                                      paramset_path,
                                      relative=True)

        with open(paramset_path) as saved_parameters:
            args = json.loads(saved_parameters.read())['args']
            # Expecting window_path to have linux style line seps
            expected_args = {
                'windows_path': 'dir1/filepath1.txt',
                'linux_path': 'dir2/filepath2.txt',
            }
            self.assertEqual(expected_args, args)

        expected_args = {
            'windows_path': os.path.join(self.workspace, 'dir1',
                                         'filepath1.txt'),
            'linux_path': os.path.join(self.workspace, 'dir2',
                                       'filepath2.txt'),
        }

        extracted_paramset = datastack.extract_parameter_set(paramset_path)
        self.assertEqual(extracted_paramset.args, expected_args)
Esempio n. 12
0
    def test_mixed_path_separators_in_paramset_mac(self):
        """Datastacks: parameter sets must handle mac and linux paths."""
        from natcap.invest import datastack

        args = {
            'mac_path': os.path.join(self.workspace, 'dir1/filepath1.txt'),
            'linux_path': os.path.join(self.workspace, 'dir2/filepath2.txt'),
        }
        for filepath in args.values():
            try:
                os.makedirs(os.path.dirname(filepath))
            except OSError:
                pass

            with open(filepath, 'w') as open_file:
                open_file.write('the contents of this file do not matter.')

        paramset_path = os.path.join(self.workspace, 'paramset.invest.json')
        datastack.build_parameter_set(args,
                                      'sample_model',
                                      paramset_path,
                                      relative=True)

        with open(paramset_path) as saved_parameters:
            args = json.loads(saved_parameters.read())['args']
            expected_args = {
                'mac_path': 'dir1/filepath1.txt',
                'linux_path': 'dir2/filepath2.txt',
            }
            self.assertEqual(expected_args, args)

        expected_args = {
            'mac_path': os.path.join(self.workspace, 'dir1', 'filepath1.txt'),
            'linux_path': os.path.join(self.workspace, 'dir2',
                                       'filepath2.txt'),
        }

        extracted_paramset = datastack.extract_parameter_set(paramset_path)
        self.assertEqual(extracted_paramset.args, expected_args)
Esempio n. 13
0
def main(user_args=None):
    """CLI entry point for launching InVEST runs.

    This command-line interface supports two methods of launching InVEST models
    from the command-line:

        * through its GUI
        * in headless mode, without its GUI.

    Running in headless mode allows us to bypass all GUI functionality,
    so models may be run in this way without having GUI packages
    installed.
    """
    parser = argparse.ArgumentParser(description=(
        'Integrated Valuation of Ecosystem Services and Tradeoffs. '
        'InVEST (Integrated Valuation of Ecosystem Services and '
        'Tradeoffs) is a family of tools for quantifying the values of '
        'natural capital in clear, credible, and practical ways. In '
        'promising a return (of societal benefits) on investments in '
        'nature, the scientific community needs to deliver knowledge and '
        'tools to quantify and forecast this return. InVEST enables '
        'decision-makers to quantify the importance of natural capital, '
        'to assess the tradeoffs associated with alternative choices, '
        'and to integrate conservation and human development.  \n\n'
        'Older versions of InVEST ran as script tools in the ArcGIS '
        'ArcToolBox environment, but have almost all been ported over to '
        'a purely open-source python environment.'),
                                     prog='invest')
    parser.add_argument('--version', action='version', version=__version__)
    verbosity_group = parser.add_mutually_exclusive_group()
    verbosity_group.add_argument(
        '-v',
        '--verbose',
        dest='verbosity',
        default=0,
        action='count',
        help=('Increase verbosity.  Affects how much logging is printed to '
              'the console and (if running in headless mode) how much is '
              'written to the logfile.'))
    verbosity_group.add_argument('--debug',
                                 dest='log_level',
                                 default=logging.CRITICAL,
                                 action='store_const',
                                 const=logging.DEBUG,
                                 help='Enable debug logging. Alias for -vvvvv')

    subparsers = parser.add_subparsers(dest='subcommand')

    listmodels_subparser = subparsers.add_parser(
        'list', help='List the available InVEST models')
    listmodels_subparser.add_argument('--json',
                                      action='store_true',
                                      help='Write output as a JSON object')

    subparsers.add_parser('launch', help='Start the InVEST launcher window')

    run_subparser = subparsers.add_parser('run', help='Run an InVEST model')
    run_subparser.add_argument('-l',
                               '--headless',
                               action='store_true',
                               help=('Run an InVEST model without its GUI. '
                                     'Requires a datastack and a workspace.'))
    run_subparser.add_argument(
        '-d',
        '--datastack',
        default=None,
        nargs='?',
        help=('Run the specified model with this JSON datastack. '
              'Required if using --headless'))
    run_subparser.add_argument(
        '-w',
        '--workspace',
        default=None,
        nargs='?',
        help=('The workspace in which outputs will be saved. '
              'Required if using --headless'))
    run_subparser.add_argument(
        'model',
        action=SelectModelAction,  # Assert valid model name
        help=('The model to run.  Use "invest list" to list the available '
              'models.'))

    quickrun_subparser = subparsers.add_parser(
        'quickrun',
        help=('Run through a model with a specific datastack, exiting '
              'immediately upon completion. This subcommand is only intended '
              'to be used by automated testing scripts.'))
    quickrun_subparser.add_argument(
        'model',
        action=SelectModelAction,  # Assert valid model name
        help=('The model to run.  Use "invest list" to list the available '
              'models.'))
    quickrun_subparser.add_argument(
        'datastack', help=('Run the model with this JSON datastack.'))
    quickrun_subparser.add_argument(
        '-w',
        '--workspace',
        default=None,
        nargs='?',
        help=('The workspace in which outputs will be saved.'))

    validate_subparser = subparsers.add_parser(
        'validate', help=('Validate the parameters of a datastack'))
    validate_subparser.add_argument('--json',
                                    action='store_true',
                                    help='Write output as a JSON object')
    validate_subparser.add_argument(
        'datastack', help=('Run the model with this JSON datastack.'))

    getspec_subparser = subparsers.add_parser(
        'getspec', help=('Get the specification of a model.'))
    getspec_subparser.add_argument('--json',
                                   action='store_true',
                                   help='Write output as a JSON object')
    getspec_subparser.add_argument(
        'model',
        action=SelectModelAction,  # Assert valid model name
        help=('The model for which the spec should be fetched.  Use "invest '
              'list" to list the available models.'))

    args = parser.parse_args(user_args)

    root_logger = logging.getLogger()
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        fmt='%(asctime)s %(name)-18s %(levelname)-8s %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S ')
    handler.setFormatter(formatter)

    # Set the log level based on what the user provides in the available
    # arguments.  Verbosity: the more v's the lower the logging threshold.
    # If --debug is used, the logging threshold is 10.
    # If the user goes lower than logging.DEBUG, default to logging.DEBUG.
    log_level = min(args.log_level, logging.CRITICAL - (args.verbosity * 10))
    handler.setLevel(max(log_level, logging.DEBUG))  # don't go below DEBUG
    root_logger.addHandler(handler)
    LOGGER.info('Setting handler log level to %s', log_level)

    # FYI: Root logger by default has a level of logging.WARNING.
    # To capture ALL logging produced in this system at runtime, use this:
    # logging.getLogger().setLevel(logging.DEBUG)
    # Also FYI: using logging.DEBUG means that the logger will defer to
    # the setting of the parent logger.
    logging.getLogger('natcap').setLevel(logging.DEBUG)

    if args.subcommand == 'list':
        if args.json:
            message = build_model_list_json()
        else:
            message = build_model_list_table()

        sys.stdout.write(message)
        parser.exit()

    if args.subcommand == 'launch':
        from natcap.invest.ui import launcher
        parser.exit(launcher.main())

    if args.subcommand == 'validate':
        try:
            parsed_datastack = datastack.extract_parameter_set(args.datastack)
        except Exception as error:
            parser.exit(
                1, "Error when parsing JSON datastack:\n    " + str(error))

        model_module = importlib.import_module(
            name=parsed_datastack.model_name)

        try:
            validation_result = getattr(model_module,
                                        'validate')(parsed_datastack.args)
        except KeyError as missing_keys_error:
            if args.json:
                message = json.dumps({
                    'validation_results': {
                        str(list(missing_keys_error.args)): 'Key is missing'
                    }
                })
            else:
                message = ('Datastack is missing keys:\n    ' +
                           str(missing_keys_error.args))

            # Missing keys have an exit code of 1 because that would indicate
            # probably programmer error.
            sys.stdout.write(message)
            parser.exit(1)
        except Exception as error:
            parser.exit(
                1, ('Datastack could not be validated:\n    ' + str(error)))

        # Even validation errors will have an exit code of 0
        if args.json:
            message = json.dumps({'validation_results': validation_result})
        else:
            message = pprint.pformat(validation_result)

        sys.stdout.write(message)
        parser.exit(0)

    if args.subcommand == 'getspec':
        target_model = _MODEL_UIS[args.model].pyname
        model_module = importlib.import_module(name=target_model)
        spec = model_module.ARGS_SPEC

        if args.json:
            message = json.dumps(spec)
        else:
            message = pprint.pformat(spec)
        sys.stdout.write(message)
        parser.exit(0)

    if args.subcommand == 'run' and args.headless:
        if not args.datastack:
            parser.exit(1, 'Datastack required for headless execution.')

        try:
            parsed_datastack = datastack.extract_parameter_set(args.datastack)
        except Exception as error:
            parser.exit(
                1, "Error when parsing JSON datastack:\n    " + str(error))

        if not args.workspace:
            if ('workspace_dir' not in parsed_datastack.args
                    or parsed_datastack.args['workspace_dir'] in ['', None]):
                parser.exit(1,
                            ('Workspace must be defined at the command line '
                             'or in the datastack file'))
        else:
            parsed_datastack.args['workspace_dir'] = args.workspace

        target_model = _MODEL_UIS[args.model].pyname
        model_module = importlib.import_module(name=target_model)
        LOGGER.info('Imported target %s from %s', model_module.__name__,
                    model_module)

        with utils.prepare_workspace(parsed_datastack.args['workspace_dir'],
                                     name=parsed_datastack.model_name,
                                     logging_level=log_level):
            LOGGER.log(
                datastack.ARGS_LOG_LEVEL,
                'Starting model with parameters: \n%s',
                datastack.format_args_dict(parsed_datastack.args,
                                           parsed_datastack.model_name))

            # We're deliberately not validating here because the user
            # can just call ``invest validate <datastack>`` to validate.
            getattr(model_module, 'execute')(parsed_datastack.args)

    # If we're running in a GUI (either through ``invest run`` or
    # ``invest quickrun``), we'll need to load the Model's GUI class,
    # populate parameters and then (if in a quickrun) exit when the model
    # completes.  Quickrun functionality is primarily useful for automated
    # testing of the model interfaces.
    if (args.subcommand == 'run' and not args.headless
            or args.subcommand == 'quickrun'):

        # Creating this warning for future us to alert us to potential issues
        # if/when we forget to define QT_MAC_WANTS_LAYER at runtime.
        if (platform.system() == "Darwin"
                and "QT_MAC_WANTS_LAYER" not in os.environ):
            warnings.warn(
                "Mac OS X Big Sur may require the 'QT_MAC_WANTS_LAYER' "
                "environment variable to be defined in order to run.  If "
                "the application hangs on startup, set 'QT_MAC_WANTS_LAYER=1' "
                "in the shell running this CLI.", RuntimeWarning)

        from natcap.invest.ui import inputs

        gui_class = _MODEL_UIS[args.model].gui
        module_name, classname = gui_class.split('.')
        module = importlib.import_module(name='.ui.%s' % module_name,
                                         package='natcap.invest')

        # Instantiate the form
        model_form = getattr(module, classname)()

        # load the datastack if one was provided
        try:
            if args.datastack:
                model_form.load_datastack(args.datastack)
        except Exception as error:
            # If we encounter an exception while loading the datastack, log the
            # exception (so it can be seen if we're running with appropriate
            # verbosity) and exit the argparse application with exit code 1 and
            # a helpful error message.
            LOGGER.exception('Could not load datastack')
            parser.exit(DEFAULT_EXIT_CODE,
                        'Could not load datastack: %s\n' % str(error))

        if args.workspace:
            model_form.workspace.set_value(args.workspace)

        # Run the UI's event loop
        quickrun = False
        if args.subcommand == 'quickrun':
            quickrun = True
        model_form.run(quickrun=quickrun)
        app_exitcode = inputs.QT_APP.exec_()

        # Handle a graceful exit
        if model_form.form.run_dialog.messageArea.error:
            parser.exit(DEFAULT_EXIT_CODE,
                        'Model %s: run failed\n' % args.model)

        if app_exitcode != 0:
            parser.exit(app_exitcode,
                        'App terminated with exit code %s\n' % app_exitcode)
Esempio n. 14
0
def main():
    """CLI entry point for launching InVEST runs.

    This command-line interface supports two methods of launching InVEST models
    from the command-line:

        * through its GUI
        * in headless mode, without its GUI.

    Running in headless mode allows us to bypass all GUI functionality,
    so models may be run in this way wthout having GUI packages
    installed.
    """

    parser = argparse.ArgumentParser(description=(
        'Integrated Valuation of Ecosystem Services and Tradeoffs.  '
        'InVEST (Integrated Valuation of Ecosystem Services and Tradeoffs) is '
        'a family of tools for quantifying the values of natural capital in '
        'clear, credible, and practical ways. In promising a return (of '
        'societal benefits) on investments in nature, the scientific community '
        'needs to deliver knowledge and tools to quantify and forecast this '
        'return. InVEST enables decision-makers to quantify the importance of '
        'natural capital, to assess the tradeoffs associated with alternative '
        'choices, and to integrate conservation and human development.  \n\n'
        'Older versions of InVEST ran as script tools in the ArcGIS ArcToolBox '
        'environment, but have almost all been ported over to a purely '
        'open-source python environment.'),
                                     prog='invest')
    list_group = parser.add_mutually_exclusive_group()
    verbosity_group = parser.add_mutually_exclusive_group()
    import natcap.invest

    parser.add_argument('--version',
                        action='version',
                        version=natcap.invest.__version__)
    verbosity_group.add_argument(
        '-v',
        '--verbose',
        dest='verbosity',
        default=0,
        action='count',
        help=('Increase verbosity. Affects how much is '
              'printed to the console and (if running '
              'in headless mode) how much is written '
              'to the logfile.'))
    verbosity_group.add_argument('--debug',
                                 dest='log_level',
                                 default=logging.CRITICAL,
                                 action='store_const',
                                 const=logging.DEBUG,
                                 help='Enable debug logging. Alias for -vvvvv')
    list_group.add_argument('--list',
                            action=ListModelsAction,
                            nargs=0,
                            const=True,
                            help='List available models')
    parser.add_argument('-l',
                        '--headless',
                        action='store_true',
                        dest='headless',
                        help=('Attempt to run InVEST without its GUI.'))
    parser.add_argument('-d',
                        '--datastack',
                        default=None,
                        nargs='?',
                        help='Run the specified model with this datastack')
    parser.add_argument('-w',
                        '--workspace',
                        default=None,
                        nargs='?',
                        help='The workspace in which outputs will be saved')

    gui_options_group = parser.add_argument_group(
        'gui options', 'These options are ignored if running in headless mode')
    gui_options_group.add_argument('-q',
                                   '--quickrun',
                                   action='store_true',
                                   help=('Run the target model without '
                                         'validating and quit with a nonzero '
                                         'exit status if an exception is '
                                         'encountered'))

    cli_options_group = parser.add_argument_group('headless options')
    cli_options_group.add_argument('-y',
                                   '--overwrite',
                                   action='store_true',
                                   default=False,
                                   help=('Overwrite the workspace without '
                                         'prompting for confirmation'))
    cli_options_group.add_argument('-n',
                                   '--no-validate',
                                   action='store_true',
                                   dest='validate',
                                   default=True,
                                   help=('Do not validate inputs before '
                                         'running the model.'))

    list_group.add_argument('model',
                            action=SelectModelAction,
                            nargs='?',
                            help=('The model/tool to run. Use --list to show '
                                  'available models/tools. Identifiable model '
                                  'prefixes may also be used. Alternatively,'
                                  'specify "launcher" to reveal a model '
                                  'launcher window.'))

    args = parser.parse_args()

    root_logger = logging.getLogger()
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        fmt='%(asctime)s %(name)-18s %(levelname)-8s %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S ')
    handler.setFormatter(formatter)

    # Set the log level based on what the user provides in the available
    # arguments.  Verbosity: the more v's the lower the logging threshold.
    # If --debug is used, the logging threshold is 10.
    # If the user goes lower than logging.DEBUG, default to logging.DEBUG.
    log_level = min(args.log_level, logging.CRITICAL - (args.verbosity * 10))
    handler.setLevel(max(log_level,
                         logging.DEBUG))  # don't go lower than DEBUG
    root_logger.addHandler(handler)
    LOGGER.info('Setting handler log level to %s', log_level)

    # FYI: Root logger by default has a level of logging.WARNING.
    # To capture ALL logging produced in this system at runtime, use this:
    # logging.getLogger().setLevel(logging.DEBUG)
    # Also FYI: using logging.DEBUG means that the logger will defer to
    # the setting of the parent logger.
    logging.getLogger('natcap').setLevel(logging.DEBUG)

    # Now that we've set up logging based on args, we can start logging.
    LOGGER.debug(args)

    try:
        # Importing model UI files here will usually import qtpy before we can
        # set the sip API in natcap.invest.ui.inputs.
        # Set it here, before we can do the actual importing.
        import sip
        # 2 indicates SIP/Qt API version 2
        sip.setapi('QString', 2)

        from natcap.invest.ui import inputs
    except ImportError as error:
        # Can't import UI, exit with nonzero exit code
        LOGGER.exception('Unable to import the UI')
        parser.error(('Unable to import the UI (failed with "%s")\n'
                      'Is the UI installed?\n'
                      '    pip install natcap.invest[ui]') % error)

    if args.model == 'launcher':
        from natcap.invest.ui import launcher
        launcher.main()

    elif args.headless:
        from natcap.invest import datastack
        target_mod = _MODEL_UIS[args.model].pyname
        model_module = importlib.import_module(name=target_mod)
        LOGGER.info('imported target %s from %s', model_module.__name__,
                    model_module)

        paramset = datastack.extract_parameter_set(args.datastack)

        # prefer CLI option for workspace dir, but use paramset workspace if
        # the CLI options do not define a workspace.
        if args.workspace:
            workspace = os.path.abspath(args.workspace)
            paramset.args['workspace_dir'] = workspace
        else:
            if 'workspace_dir' in paramset.args:
                workspace = paramset.args['workspace_dir']
            else:
                parser.exit(DEFAULT_EXIT_CODE,
                            ('Workspace not defined. \n'
                             'Use --workspace to specify or add a '
                             '"workspace_dir" parameter to your datastack.'))

        with utils.prepare_workspace(workspace,
                                     name=paramset.model_name,
                                     logging_level=log_level):
            LOGGER.log(
                datastack.ARGS_LOG_LEVEL,
                datastack.format_args_dict(paramset.args, paramset.model_name))
            if not args.validate:
                LOGGER.info('Skipping validation by user request')
            else:
                model_warnings = []
                try:
                    model_warnings = getattr(model_module,
                                             'validate')(paramset.args)
                except AttributeError:
                    LOGGER.warn(
                        '%s does not have a defined validation function.',
                        paramset.model_name)
                finally:
                    if model_warnings:
                        LOGGER.warn('Warnings found: \n%s',
                                    pprint.pformat(model_warnings))

            if not args.workspace:
                args.workspace = os.getcwd()

            # If the workspace exists and we don't have up-front permission to
            # overwrite the workspace, prompt for permission.
            if (os.path.exists(args.workspace)
                    and len(os.listdir(args.workspace)) > 0
                    and not args.overwrite):
                overwrite_denied = False
                if not sys.stdout.isatty():
                    overwrite_denied = True
                else:
                    user_response = raw_input(
                        'Workspace exists: %s\n    Overwrite? (y/n) ' %
                        (os.path.abspath(args.workspace)))
                    while user_response not in ('y', 'n'):
                        user_response = raw_input(
                            "Response must be either 'y' or 'n': ")
                    if user_response == 'n':
                        overwrite_denied = True

                if overwrite_denied:
                    # Exit the parser with an error message.
                    parser.exit(DEFAULT_EXIT_CODE,
                                ('Use --workspace to define an '
                                 'alternate workspace.  Aborting.'))
                else:
                    LOGGER.warning(
                        'Overwriting the workspace per user input %s',
                        os.path.abspath(args.workspace))

            if 'workspace_dir' not in paramset.args:
                paramset.args['workspace_dir'] = args.workspace

            # execute the model's execute function with the loaded args
            getattr(model_module, 'execute')(paramset.args)
    else:
        # import the GUI from the known class
        gui_class = _MODEL_UIS[args.model].gui
        module_name, classname = gui_class.split('.')
        module = importlib.import_module(name='.ui.%s' % module_name,
                                         package='natcap.invest')

        # Instantiate the form
        model_form = getattr(module, classname)()

        # load the datastack if one was provided
        try:
            if args.datastack:
                model_form.load_datastack(args.datastack)
        except Exception as error:
            # If we encounter an exception while loading the datastack, log the
            # exception (so it can be seen if we're running with appropriate
            # verbosity) and exit the argparse application with exit code 1 and
            # a helpful error message.
            LOGGER.exception('Could not load datastack')
            parser.exit(DEFAULT_EXIT_CODE,
                        'Could not load datastack: %s\n' % str(error))

        if args.workspace:
            model_form.workspace.set_value(args.workspace)

        # Run the UI's event loop
        model_form.run(quickrun=args.quickrun)
        app_exitcode = inputs.QT_APP.exec_()

        # Handle a graceful exit
        if model_form.form.run_dialog.messageArea.error:
            parser.exit(DEFAULT_EXIT_CODE,
                        'Model %s: run failed\n' % args.model)

        if app_exitcode != 0:
            parser.exit(app_exitcode,
                        'App terminated with exit code %s\n' % app_exitcode)