Example #1
0
def get_config_options():
    result = []

    def add_option(*args, **kwargs):
        opt = config.Option(*args, **kwargs)
        result.append(opt)
        return opt

    main_address = add_option(
        config_key="mainAddress",
        default="192.168.1.100:27017",
        type=str)

    # -m is for the main address, which is a host:port pair, ideally of the
    # mongos. For non sharded clusters, it can be the primary.
    main_address.add_cli(
        "-m", "--main", dest="main_address", help=
        "Specify the main address, which is a"
        " host:port pair. For sharded clusters, this"
        " should be the mongos address. For individual"
        " replica sets, supply the address of the"
        " primary. For example, `-m localhost:27217`"
        " would be a valid argument to `-m`. Don't use"
        " quotes around the address.")

    oplog_file = add_option(
        config_key="oplogFile",
        default="oplog.timestamp",
        type=str)

    # -o is to specify the oplog-config file. This file is used by the system
    # to store the last timestamp read on a specific oplog. This allows for
    # quick recovery from failure.
    oplog_file.add_cli(
        "-o", "--oplog-ts", dest="oplog_file", help=
        "Specify the name of the file that stores the "
        "oplog progress timestamps. "
        "This file is used by the system to store the last "
        "timestamp read on a specific oplog. This allows "
        "for quick recovery from failure. By default this "
        "is `config.txt`, which starts off empty. An empty "
        "file causes the system to go through all the mongo "
        "oplog and sync all the documents. Whenever the "
        "cluster is restarted, it is essential that the "
        "oplog-timestamp config file be emptied - otherwise "
        "the connector will miss some documents and behave "
        "incorrectly.")

    no_dump = add_option(
        config_key="noDump",
        default=True,
        type=bool)

    # --no-dump specifies whether we should read an entire collection from
    # scratch if no timestamp is found in the oplog_config.
    no_dump.add_cli(
        "--no-dump", action="store_true", dest="no_dump", help=
        "If specified, this flag will ensure that "
        "mongo_connector won't read the entire contents of a "
        "namespace iff --oplog-ts points to an empty file.")

    batch_size = add_option(
        config_key="batchSize",
        default=constants.DEFAULT_BATCH_SIZE,
        type=int)

    # --batch-size specifies num docs to read from oplog before updating the
    # --oplog-ts config file with current oplog position
    batch_size.add_cli(
        "--batch-size", type="int", dest="batch_size", help=
        "Specify an int to update the --oplog-ts "
        "config file with latest position of oplog every "
        "N documents. By default, the oplog config isn't "
        "updated until we've read through the entire oplog. "
        "You may want more frequent updates if you are at risk "
        "of falling behind the earliest timestamp in the oplog")

    def apply_verbosity(option, cli_values):
        if cli_values['verbose']:
            option.value = 3
        if option.value < 0 or option.value > 3:
            raise errors.InvalidConfiguration(
                "verbosity must be in the range [0, 3].")

    # Default is warnings and above.
    verbosity = add_option(
        config_key="verbosity",
        default=1,
        type=int,
        apply_function=apply_verbosity)

    # -v enables verbose logging
    verbosity.add_cli(
        "-v", "--verbose", action="store_true",
        dest="verbose", help="Enables verbose logging.")

    def apply_logging(option, cli_values):
        log_mechs_enabled = [cli_values[m]
                             for m in ('logfile', 'enable_syslog', 'stdout')
                             if cli_values[m]]
        if len(log_mechs_enabled) > 1:
            raise errors.InvalidConfiguration(
                "You cannot specify more than one logging method "
                "simultaneously. Please choose the logging method you "
                "prefer. ")
        if cli_values['log_format']:
            option.value['format'] = cli_values['log_format']

        if cli_values['logfile']:
            when = cli_values['logfile_when']
            interval = cli_values['logfile_interval']
            if (when and when.startswith('W') and
                    interval != constants.DEFAULT_LOGFILE_INTERVAL):
                raise errors.InvalidConfiguration(
                    "You cannot specify a log rotation interval when rotating "
                    "based on a weekday (W0 - W6).")

            option.value['type'] = 'file'
            option.value['filename'] = cli_values['logfile']
            if when:
                option.value['rotationWhen'] = when
            if interval:
                option.value['rotationInterval'] = interval
            if cli_values['logfile_backups']:
                option.value['rotationBackups'] = cli_values['logfile_backups']

        if cli_values['enable_syslog']:
            option.value['type'] = 'syslog'

        if cli_values['syslog_host']:
            option.value['host'] = cli_values['syslog_host']

        if cli_values['syslog_facility']:
            option.value['facility'] = cli_values['syslog_facility']

        if cli_values['stdout']:
            option.value['type'] = 'stream'

        # Expand the full path to log file
        option.value['filename'] = os.path.abspath(option.value['filename'])

    default_logging = {
        'type': 'file',
        'filename': 'mongo-connector.log',
        'format': constants.DEFAULT_LOG_FORMAT,
        'rotationInterval': constants.DEFAULT_LOGFILE_INTERVAL,
        'rotationBackups': constants.DEFAULT_LOGFILE_BACKUPCOUNT,
        'rotationWhen': constants.DEFAULT_LOGFILE_WHEN,
        'host': constants.DEFAULT_SYSLOG_HOST,
        'facility': constants.DEFAULT_SYSLOG_FACILITY
    }

    logging = add_option(
        config_key="logging",
        default=default_logging,
        type=dict,
        apply_function=apply_logging)

    # -w enables logging to a file
    logging.add_cli(
        "-w", "--logfile", dest="logfile", help=
        "Log all output to the specified file.")

    logging.add_cli(
        '--stdout', dest='stdout', action='store_true', help=
        'Log all output to STDOUT rather than a logfile.')

    logging.add_cli(
        "--log-format", dest="log_format", help=
        "Define a specific format for the log file. "
        "This is based on the python logging lib. "
        "Available parameters can be found at "
        "https://docs.python.org/2/library/logging.html#logrecord-attributes")

    # -s is to enable syslog logging.
    logging.add_cli(
        "-s", "--enable-syslog", action="store_true",
        dest="enable_syslog", help=
        "The syslog host, which may be an address like 'localhost:514' or, "
        "on Unix/Linux, the path to a Unix domain socket such as '/dev/log'.")

    # --syslog-host is to specify the syslog host.
    logging.add_cli(
        "--syslog-host", dest="syslog_host", help=
        "Used to specify the syslog host."
        " The default is 'localhost:514'")

    # --syslog-facility is to specify the syslog facility.
    logging.add_cli(
        "--syslog-facility", dest="syslog_facility", help=
        "Used to specify the syslog facility."
        " The default is 'user'")

    # --logfile-when specifies the type of interval of the rotating file
    # (seconds, minutes, hours)
    logging.add_cli("--logfile-when", action="store", dest="logfile_when",
                    type="string",
                    help="The type of interval for rotating the log file. "
                    "Should be one of "
                    "'S' (seconds), 'M' (minutes), 'H' (hours), "
                    "'D' (days), 'W0' - 'W6' (days of the week 0 - 6), "
                    "or 'midnight' (the default). See the Python documentation "
                    "for 'logging.handlers.TimedRotatingFileHandler' for more "
                    "details.")

    # --logfile-interval specifies when to create a new log file
    logging.add_cli("--logfile-interval", action="store",
                    dest="logfile_interval", type="int",
                    help="How frequently to rotate the log file, "
                    "specifically, how many units of the rotation interval "
                    "should pass before the rotation occurs. For example, "
                    "to create a new file each hour: "
                    " '--logfile-when=H --logfile-interval=1'. "
                    "Defaults to 1. You may not use this option if "
                    "--logfile-when is set to a weekday (W0 - W6). "
                    "See the Python documentation for "
                    "'logging.handlers.TimedRotatingFileHandler' for more "
                    "details. ")

    # --logfile-backups specifies how many log files will be kept.
    logging.add_cli("--logfile-backups", action="store",
                    dest="logfile_backups", type="int",
                    help="How many log files will be kept after rotation. "
                    "If set to zero, then no log files will be deleted. "
                    "Defaults to 7.")

    def apply_authentication(option, cli_values):
        if cli_values['admin_username']:
            option.value['adminUsername'] = cli_values['admin_username']

        if cli_values['password']:
            option.value['password'] = cli_values['password']

        if cli_values['password_file']:
            option.value['passwordFile'] = cli_values['password_file']

        if option.value.get("adminUsername"):
            password = option.value.get("password")
            passwordFile = option.value.get("passwordFile")
            if not password and not passwordFile:
                raise errors.InvalidConfiguration(
                    "Admin username specified without password.")
            if password and passwordFile:
                raise errors.InvalidConfiguration(
                    "Can't specify both password and password file.")

    default_authentication = {
        'adminUsername': None,
        'password': None,
        'passwordFile': None
    }

    authentication = add_option(
        config_key="authentication",
        default=default_authentication,
        type=dict,
        apply_function=apply_authentication)

    # -a is to specify the username for authentication.
    authentication.add_cli(
        "-a", "--admin-username", dest="admin_username", help=
        "Used to specify the username of an admin user to "
        "authenticate with. To use authentication, the user "
        "must specify both an admin username and a keyFile.")

    # -p is to specify the password used for authentication.
    authentication.add_cli(
        "-p", "--password", dest="password", help=
        "Used to specify the password."
        " This is used by mongos to authenticate"
        " connections to the shards, and in the"
        " oplog threads. If authentication is not used, then"
        " this field can be left empty as the default ")

    # -f is to specify the authentication key file. This file is used by mongos
    # to authenticate connections to the shards, and we'll use it in the oplog
    # threads.
    authentication.add_cli(
        "-f", "--password-file", dest="password_file", help=
        "Used to store the password for authentication."
        " Use this option if you wish to specify a"
        " username and password but don't want to"
        " type in the password. The contents of this"
        " file should be the password for the admin user.")

    def apply_fields(option, cli_values):
        if cli_values['fields']:
            option.value = cli_values['fields'].split(",")

    fields = add_option(
        config_key="fields",
        default=[],
        type=list,
        apply_function=apply_fields)

    # -i to specify the list of fields to export
    fields.add_cli(
        "-i", "--fields", dest="fields", help=
        "Use a comma separated list of fields to specify multiple fields. "
        "Will copy over the fields specified into a new document."
        "The '_id', 'ns' and '_ts' fields are always "
        "exported. Supports dot notation for document fields but cannot span "
        "arrays. Cannot use both 'fields' and 'exclude_fields'.")

    def apply_exclude_fields(option, cli_values):
        if cli_values['exclude_fields']:
            option.value = cli_values['exclude_fields'].split(",")

    exclude_fields = add_option(
        config_key="exclude_fields",
        default=[],
        type=list,
        apply_function=apply_exclude_fields)

    # -i to specify the list of fields to exclude
    exclude_fields.add_cli(
        "-e", "--exclude_fields", dest="exclude_fields", help=
        "Use a comma separated list of fields to specify multiple "
        "fields to exclude. Will delete the fields specified from the "
        "existing document. The '_id', 'ns' and '_ts' fields are always "
        "exported. Supports dot notation for document fields but cannot span "
        "arrays. Cannot use both 'fields' and 'exclude_fields'.")

    def apply_namespaces(option, cli_values):
        if cli_values['ns_set']:
            option.value['include'] = cli_values['ns_set'].split(',')

        if cli_values['ex_ns_set']:
            option.value['exclude'] = cli_values['ex_ns_set'].split(',')

        if cli_values['gridfs_set']:
            option.value['gridfs'] = cli_values['gridfs_set'].split(',')

        if cli_values['dest_ns_set']:
            ns_set = option.value['include']
            dest_ns_set = cli_values['dest_ns_set'].split(',')
            if len(ns_set) != len(dest_ns_set):
                raise errors.InvalidConfiguration(
                    "Destination namespace set should be the"
                    " same length as the origin namespace set.")
            option.value['mapping'] = dict(zip(ns_set, dest_ns_set))

        ns_set = option.value['include']
        if len(ns_set) != len(set(ns_set)):
            raise errors.InvalidConfiguration(
                "Namespace set should not contain any duplicates.")

        ex_ns_set = option.value['exclude']
        if len(ex_ns_set) != len(set(ex_ns_set)):
            raise errors.InvalidConfiguration(
                "Exclude namespace set should not contain any duplicates.")

        # not allow to exist both 'include' and 'exclude'
        if ns_set and ex_ns_set:
            raise errors.InvalidConfiguration(
                "Cannot use both namespace 'include' "
                "(--namespace-set) and 'exclude' "
                "(--exclude-namespace-set).")

        # validate 'include' format
        for ns in ns_set:
            if ns.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "Namespace set should be plain text "
                    "e.g. foo.bar or only contains one wildcard, e.g. foo.* .")

        # validate 'exclude' format
        for ens in ex_ns_set:
            if ens.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "Exclude namespace set should be plain text "
                    "e.g. foo.bar or only contains one wildcard, e.g. foo.* .")

        dest_mapping = option.value['mapping']
        if len(dest_mapping) != len(set(dest_mapping.values())):
            raise errors.InvalidConfiguration(
                "Destination namespaces set should not"
                " contain any duplicates.")

        for key, value in dest_mapping.items():
            if key.count("*") > 1 or value.count("*") > 1:
                raise errors.InvalidConfiguration(
                    "The namespace mapping source and destination "
                    "cannot contain more than one '*' character.")
            if key.count("*") != value.count("*"):
                raise errors.InvalidConfiguration(
                    "The namespace mapping source and destination "
                    "must contain the same number of '*' characters.")

        gridfs_set = option.value['gridfs']
        if len(gridfs_set) != len(set(gridfs_set)):
            raise errors.InvalidConfiguration(
                "GridFS set should not contain any duplicates.")

    default_namespaces = {
        "include": ["kxlist_list.quotelist"],
        "exclude": [],
        "mapping": {},
        "gridfs": []
    }

    namespaces = add_option(
        config_key="namespaces",
        default=default_namespaces,
        type=dict,
        apply_function=apply_namespaces)

    # -n is to specify the namespaces we want to consider. The default
    # considers all the namespaces
    namespaces.add_cli(
        "-n", "--namespace-set", dest="ns_set", help=
        "Used to specify the namespaces we want to "
        "consider. For example, if we wished to store all "
        "documents from the test.test and alpha.foo "
        "namespaces, we could use `-n test.test,alpha.foo`. "
        "You can also use, for example, `-n test.*` to store "
        "documents from all the collections of db test. "
        "The default is to consider all the namespaces, "
        "excluding the system and config databases, and "
        "also ignoring the \"system.indexes\" collection in "
        "any database. This cannot be used together with "
        "'--exclude-namespace-set'!")

    # -x is to specify the namespaces we dont want to consider. The default
    # is empty
    namespaces.add_cli(
        "-x", "--exclude-namespace-set", dest="ex_ns_set", help=
        "Used to specify the namespaces we do not want to "
        "consider. For example, if we wished to ignore all "
        "documents from the test.test and alpha.foo "
        "namespaces, we could use `-x test.test,alpha.foo`. "
        "You can also use, for example, `-x test.*` to ignore "
        "documents from all the collections of db test. "
        "The default is not to exclude any namespace. "
        "This cannot be used together with '--namespace-set'!")

    # -g is the destination namespace
    namespaces.add_cli(
        "-g", "--dest-namespace-set", dest="dest_ns_set", help=
        "Specify a destination namespace mapping. Each "
        "namespace provided in the --namespace-set option "
        "will be mapped respectively according to this "
        "comma-separated list. These lists must have "
        "equal length. "
        "It also supports mapping using wildcard, for example, "
        "map foo.* to bar_*.someting, means that if we have two "
        "collections foo.a and foo.b, they will map to "
        "bar_a.something and bar_b.something. "
        "The default is to use the identity "
        "mapping. This works for mongo-to-mongo as well as"
        "mongo-to-elasticsearch connections.")

    # --gridfs-set is the set of GridFS namespaces to consider
    namespaces.add_cli(
        "--gridfs-set", dest="gridfs_set", help=
        "Used to specify the GridFS namespaces we want to "
        "consider. For example, if your metadata is stored in "
        "test.fs.files and chunks are stored in test.fs.chunks, "
        "you can use `--gridfs-set test.fs`.")

    def apply_doc_managers(option, cli_values):
        if not option.value:
            if not cli_values['doc_manager'] and not cli_values['target_url']:
                return
            option.value = [{}]

        # Command line options should override the first DocManager config.
        cli_to_config = dict(doc_manager='docManager',
                             target_url='targetURL',
                             auto_commit_interval='autoCommitInterval',
                             unique_key='uniqueKey')
        first_dm = option.value[0]
        for cli_name, value in cli_values.items():
            if value is not None:
                first_dm[cli_to_config[cli_name]] = value

        # validate doc managers and fill in default values
        for dm in option.value:
            if not isinstance(dm, dict):
                raise errors.InvalidConfiguration(
                    "Elements of docManagers must be a dict.")
            if 'docManager' not in dm and 'docManagerClassPath' not in dm:
                raise errors.InvalidConfiguration(
                    "Every element of docManagers"
                    " must contain 'docManager' property.")
            if not dm.get('targetURL'):
                dm['targetURL'] = None
            if not dm.get('uniqueKey'):
                dm['uniqueKey'] = constants.DEFAULT_UNIQUE_KEY
            if dm.get('autoCommitInterval') is None:
                dm['autoCommitInterval'] = constants.DEFAULT_COMMIT_INTERVAL
            if not dm.get('args'):
                dm['args'] = {}
            if not dm.get('bulkSize'):
                dm['bulkSize'] = constants.DEFAULT_MAX_BULK

            aci = dm['autoCommitInterval']
            if aci is not None and aci < 0:
                raise errors.InvalidConfiguration(
                    "autoCommitInterval must be non-negative.")

        def import_dm_by_name(name):
            full_name = "mongo_connector.doc_managers.%s.DocManager" % name
            return import_dm_by_path(full_name)

        def import_dm_by_path(path):
            try:
                # importlib doesn't exist in 2.6, but __import__ is everywhere
                package, klass = path.rsplit('.', 1)
                module = __import__(package, fromlist=(package,))
                dm_impl = getattr(module, klass)
                if not issubclass(dm_impl, DocManagerBase):
                    raise TypeError("DocManager must inherit DocManagerBase.")
                return dm_impl
            except ImportError:
                raise errors.InvalidConfiguration(
                    "Could not import %s. It could be that this doc manager ha"
                    "s been moved out of this project and is maintained elsewh"
                    "ere. Make sure that you have the doc manager installed al"
                    "ongside mongo-connector. Check the README for a list of a"
                    "vailable doc managers." % package)
                sys.exit(1)
            except (AttributeError, TypeError):
                raise errors.InvalidConfiguration(
                    "No definition for DocManager found in %s." % package)
                sys.exit(1)

        # instantiate the doc manager objects
        dm_instances = []
        for dm in option.value:
            if 'docManagerClassPath' in dm:
                DocManager = import_dm_by_path(dm['docManagerClassPath'])
            else:
                DocManager = import_dm_by_name(dm['docManager'])
            kwargs = {
                'unique_key': dm['uniqueKey'],
                'auto_commit_interval': dm['autoCommitInterval'],
                'chunk_size': dm['bulkSize']
            }
            for k in dm['args']:
                if k not in kwargs:
                    kwargs[k] = dm['args'][k]

            target_url = dm['targetURL']
            if target_url:
                dm_instances.append(DocManager(target_url, **kwargs))
            else:
                dm_instances.append(DocManager(**kwargs))

        option.value = dm_instances

    doc_managers = add_option(
        config_key="docManagers",
        default=None,
        type=list,
        apply_function=apply_doc_managers)

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-d", "--doc-manager", dest="doc_manager", help=
        "Used to specify the path to each doc manager "
        "file that will be used. DocManagers should be "
        "specified in the same order as their respective "
        "target addresses in the --target-urls option. "
        "URLs are assigned to doc managers "
        "respectively. Additional doc managers are "
        "implied to have no target URL. Additional URLs "
        "are implied to have the same doc manager type as "
        "the last doc manager for which a URL was "
        "specified. By default, Mongo Connector will use "
        "'doc_manager_simulator.py'.  It is recommended "
        "that all doc manager files be kept in the "
        "doc_managers folder in mongo-connector. For "
        "more information about making your own doc "
        "manager, see 'Writing Your Own DocManager' "
        "section of the wiki")

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-t", "--target-url",
        dest="target_url", help=
        "Specify the URL to each target system being "
        "used. For example, if you were using Solr out of "
        "the box, you could use '-t "
        "http://localhost:8080/solr' with the "
        "SolrDocManager to establish a proper connection. "
        "URLs should be specified in the same order as "
        "their respective doc managers in the "
        "--doc-managers option.  URLs are assigned to doc "
        "managers respectively. Additional doc managers "
        "are implied to have no target URL. Additional "
        "URLs are implied to have the same doc manager "
        "type as the last doc manager for which a URL was "
        "specified. "
        "Don't use quotes around addresses. ")

    # -u is to specify the mongoDB field that will serve as the unique key
    # for the target system,
    doc_managers.add_cli(
        "-u", "--unique-key", dest="unique_key", help=
        "The name of the MongoDB field that will serve "
        "as the unique key for the target system. "
        "Note that this option does not apply "
        "when targeting another MongoDB cluster. "
        "Defaults to \"_id\".")

    # --auto-commit-interval to specify auto commit time interval
    doc_managers.add_cli(
        "--auto-commit-interval", type="int",
        dest="auto_commit_interval", help=
        "Seconds in-between calls for the Doc Manager"
        " to commit changes to the target system. A value of"
        " 0 means to commit after every write operation."
        " When left unset, Mongo Connector will not make"
        " explicit commits. Some systems have"
        " their own mechanism for adjusting a commit"
        " interval, which should be preferred to this"
        " option.")

    continue_on_error = add_option(
        config_key="continueOnError",
        default=False,
        type=bool)

    def apply_ssl(option, cli_values):
        option.value = option.value or {}
        ssl_certfile = cli_values.pop('ssl_certfile')
        ssl_keyfile = cli_values.pop('ssl_keyfile')
        ssl_cert_reqs = cli_values.pop('ssl_cert_reqs')
        ssl_ca_certs = (
            cli_values.pop('ssl_ca_certs') or option.value.get('sslCACerts'))

        if ssl_cert_reqs and ssl_cert_reqs != 'ignored' and not ssl_ca_certs:
            raise errors.InvalidConfiguration(
                '--ssl-ca-certs must be provided if the '
                '--ssl-certificate-policy is not "ignored".')
        option.value.setdefault('sslCertfile', ssl_certfile)
        option.value.setdefault('sslCACerts', ssl_ca_certs)
        option.value.setdefault('sslKeyfile', ssl_keyfile)
        option.value['sslCertificatePolicy'] = _SSL_POLICY_MAP.get(
            ssl_cert_reqs)
    ssl = add_option(
        config_key="ssl",
        default={},
        type=dict,
        apply_function=apply_ssl)
    ssl.add_cli(
        '--ssl-certfile', dest='ssl_certfile',
        help=('Path to a certificate identifying the local connection '
              'to MongoDB.')
    )
    ssl.add_cli(
        '--ssl-keyfile', dest='ssl_keyfile',
        help=('Path to the private key for --ssl-certfile. '
              'Not necessary if already included in --ssl-certfile.')
    )
    ssl.add_cli(
        '--ssl-certificate-policy', dest='ssl_cert_reqs',
        choices=('required', 'optional', 'ignored'),
        help=('Policy for validating SSL certificates provided from the other '
              'end of the connection. There are three possible values: '
              'required = Require and validate the remote certificate. '
              'optional = Validate the remote certificate only if one '
              'is provided. '
              'ignored = Remote SSL certificates are ignored completely.')
    )
    ssl.add_cli(
        '--ssl-ca-certs', dest='ssl_ca_certs',
        help=('Path to a concatenated set of certificate authority '
              'certificates to validate the other side of the connection. ')
    )

    # --continue-on-error to continue to upsert documents during a collection
    # dump, even if the documents cannot be inserted for some reason
    continue_on_error.add_cli(
        "--continue-on-error", action="store_true",
        dest="continue_on_error", help=
        "By default, if any document fails to upsert"
        " during a collection dump, the entire operation fails."
        " When this flag is enabled, normally fatal errors"
        " will be caught and logged, allowing the collection"
        " dump to continue.\n"
        "Note: Applying oplog operations to an incomplete"
        " set of documents due to errors may cause undefined"
        " behavior. Use this flag to dump only.")

    config_file = add_option()
    config_file.add_cli(
        "-c", "--config-file", dest="config_file", help=
        "Specify a JSON file to load configurations from. You can find"
        " an example config file at mongo-connector/config.json")

    tz_aware = add_option(
        config_key="timezoneAware", default=False, type=bool)
    tz_aware.add_cli(
        "--tz-aware", dest="tz_aware", action="store_true",
        help="Make all dates and times timezone-aware.")

    return result
Example #2
0
def get_config_options():
    result = []

    def add_option(*args, **kwargs):
        opt = config.Option(*args, **kwargs)
        result.append(opt)
        return opt

    main_address = add_option(
        config_key="mainAddress",
        default="localhost:27017",
        type=str)

    # -m is for the main address, which is a host:port pair, ideally of the
    # mongos. For non sharded clusters, it can be the primary.
    main_address.add_cli(
        "-m", "--main", dest="main_address", help=
        "Specify the main address, which is a"
        " host:port pair. For sharded clusters, this"
        " should be the mongos address. For individual"
        " replica sets, supply the address of the"
        " primary. For example, `-m localhost:27217`"
        " would be a valid argument to `-m`. Don't use"
        " quotes around the address.")

    oplog_file = add_option(
        config_key="oplogFile",
        default="oplog.timestamp",
        type=str)

    # -o is to specify the oplog-config file. This file is used by the system
    # to store the last timestamp read on a specific oplog. This allows for
    # quick recovery from failure.
    oplog_file.add_cli(
        "-o", "--oplog-ts", dest="oplog_file", help=
        "Specify the name of the file that stores the "
        "oplog progress timestamps. "
        "This file is used by the system to store the last "
        "timestamp read on a specific oplog. This allows "
        "for quick recovery from failure. By default this "
        "is `config.txt`, which starts off empty. An empty "
        "file causes the system to go through all the mongo "
        "oplog and sync all the documents. Whenever the "
        "cluster is restarted, it is essential that the "
        "oplog-timestamp config file be emptied - otherwise "
        "the connector will miss some documents and behave "
        "incorrectly.")

    no_dump = add_option(
        config_key="noDump",
        default=False,
        type=bool)

    # --no-dump specifies whether we should read an entire collection from
    # scratch if no timestamp is found in the oplog_config.
    no_dump.add_cli(
        "--no-dump", action="store_true", dest="no_dump", help=
        "If specified, this flag will ensure that "
        "mongo_connector won't read the entire contents of a "
        "namespace iff --oplog-ts points to an empty file.")

    batch_size = add_option(
        config_key="batchSize",
        default=constants.DEFAULT_BATCH_SIZE,
        type=int)

    # --batch-size specifies num docs to read from oplog before updating the
    # --oplog-ts config file with current oplog position
    batch_size.add_cli(
        "--batch-size", type="int", dest="batch_size", help=
        "Specify an int to update the --oplog-ts "
        "config file with latest position of oplog every "
        "N documents. By default, the oplog config isn't "
        "updated until we've read through the entire oplog. "
        "You may want more frequent updates if you are at risk "
        "of falling behind the earliest timestamp in the oplog")

    def apply_verbosity(option, cli_values):
        if cli_values['verbose']:
            option.value = 3
        if option.value < 0 or option.value > 3:
            raise errors.InvalidConfiguration(
                "verbosity must be in the range [0, 3].")

    verbosity = add_option(
        config_key="verbosity",
        default=0,
        type=int,
        apply_function=apply_verbosity)

    # -v enables verbose logging
    verbosity.add_cli(
        "-v", "--verbose", action="store_true",
        dest="verbose", help="Enables verbose logging.")

    def apply_logging(option, cli_values):
        log_mechs_enabled = [cli_values[m]
                             for m in ('logfile', 'enable_syslog', 'stdout')
                             if cli_values[m]]
        if len(log_mechs_enabled) > 1:
            raise errors.InvalidConfiguration(
                "You cannot specify more than one logging method "
                "simultaneously. Please choose the logging method you "
                "prefer. ")
        if cli_values['log_format']:
            option.value['format'] = cli_values['log_format']

        if cli_values['logfile']:
            when = cli_values['logfile_when']
            interval = cli_values['logfile_interval']
            if (when and when.startswith('W') and
                    interval != constants.DEFAULT_LOGFILE_INTERVAL):
                raise errors.InvalidConfiguration(
                    "You cannot specify a log rotation interval when rotating "
                    "based on a weekday (W0 - W6).")

            option.value['type'] = 'file'
            option.value['filename'] = cli_values['logfile']
            if when:
                option.value['rotationWhen'] = when
            if interval:
                option.value['rotationInterval'] = interval
            if cli_values['logfile_backups']:
                option.value['rotationBackups'] = cli_values['logfile_backups']

        if cli_values['enable_syslog']:
            option.value['type'] = 'syslog'

        if cli_values['syslog_host']:
            option.value['host'] = cli_values['syslog_host']

        if cli_values['syslog_facility']:
            option.value['facility'] = cli_values['syslog_facility']

        if cli_values['stdout']:
            option.value['type'] = 'stream'

    default_logging = {
        'type': 'file',
        'filename': 'mongo-connector.log',
        'format': constants.DEFAULT_LOG_FORMAT,
        'rotationInterval': constants.DEFAULT_LOGFILE_INTERVAL,
        'rotationBackups': constants.DEFAULT_LOGFILE_BACKUPCOUNT,
        'rotationWhen': constants.DEFAULT_LOGFILE_WHEN,
        'host': constants.DEFAULT_SYSLOG_HOST,
        'facility': constants.DEFAULT_SYSLOG_FACILITY
    }

    logging = add_option(
        config_key="logging",
        default=default_logging,
        type=dict,
        apply_function=apply_logging)

    # -w enables logging to a file
    logging.add_cli(
        "-w", "--logfile", dest="logfile", help=
        "Log all output to the specified file.")

    logging.add_cli(
        '--stdout', dest='stdout', action='store_true', help=
        'Log all output to STDOUT rather than a logfile.')

    logging.add_cli(
        "--log-format", dest="log_format", help=
        "Define a specific format for the log file. "
        "This is based on the python logging lib. "
        "Available parameters can be found at "
        "https://docs.python.org/2/library/logging.html#logrecord-attributes")

    # -s is to enable syslog logging.
    logging.add_cli(
        "-s", "--enable-syslog", action="store_true",
        dest="enable_syslog", help=
        "The syslog host, which may be an address like 'localhost:514' or, "
        "on Unix/Linux, the path to a Unix domain socket such as '/dev/log'.")

    # --syslog-host is to specify the syslog host.
    logging.add_cli(
        "--syslog-host", dest="syslog_host", help=
        "Used to specify the syslog host."
        " The default is 'localhost:514'")

    # --syslog-facility is to specify the syslog facility.
    logging.add_cli(
        "--syslog-facility", dest="syslog_facility", help=
        "Used to specify the syslog facility."
        " The default is 'user'")

    # --logfile-when specifies the type of interval of the rotating file
    # (seconds, minutes, hours)
    logging.add_cli("--logfile-when", action="store", dest="logfile_when",
                    type="string",
                    help="The type of interval for rotating the log file. "
                    "Should be one of "
                    "'S' (seconds), 'M' (minutes), 'H' (hours), "
                    "'D' (days), 'W0' - 'W6' (days of the week 0 - 6), "
                    "or 'midnight' (the default). See the Python documentation "
                    "for 'logging.handlers.TimedRotatingFileHandler' for more "
                    "details.")

    # --logfile-interval specifies when to create a new log file
    logging.add_cli("--logfile-interval", action="store",
                    dest="logfile_interval", type="int",
                    help="How frequently to rotate the log file, "
                    "specifically, how many units of the rotation interval "
                    "should pass before the rotation occurs. For example, "
                    "to create a new file each hour: "
                    " '--logfile-when=H --logfile-interval=1'. "
                    "Defaults to 1. You may not use this option if "
                    "--logfile-when is set to a weekday (W0 - W6). "
                    "See the Python documentation for "
                    "'logging.handlers.TimedRotatingFileHandler' for more "
                    "details. ")

    # --logfile-backups specifies how many log files will be kept.
    logging.add_cli("--logfile-backups", action="store",
                    dest="logfile_backups", type="int",
                    help="How many log files will be kept after rotation. "
                    "If set to zero, then no log files will be deleted. "
                    "Defaults to 7.")

    def apply_authentication(option, cli_values):
        if cli_values['admin_username']:
            option.value['adminUsername'] = cli_values['admin_username']

        if cli_values['password']:
            option.value['password'] = cli_values['password']

        if cli_values['password_file']:
            option.value['passwordFile'] = cli_values['password_file']

        if option.value.get("adminUsername"):
            password = option.value.get("password")
            passwordFile = option.value.get("passwordFile")
            if not password and not passwordFile:
                raise errors.InvalidConfiguration(
                    "Admin username specified without password.")
            if password and passwordFile:
                raise errors.InvalidConfiguration(
                    "Can't specify both password and password file.")

    default_authentication = {
        'adminUsername': None,
        'password': None,
        'passwordFile': None
    }

    authentication = add_option(
        config_key="authentication",
        default=default_authentication,
        type=dict,
        apply_function=apply_authentication)

    # -a is to specify the username for authentication.
    authentication.add_cli(
        "-a", "--admin-username", dest="admin_username", help=
        "Used to specify the username of an admin user to "
        "authenticate with. To use authentication, the user "
        "must specify both an admin username and a keyFile.")

    # -p is to specify the password used for authentication.
    authentication.add_cli(
        "-p", "--password", dest="password", help=
        "Used to specify the password."
        " This is used by mongos to authenticate"
        " connections to the shards, and in the"
        " oplog threads. If authentication is not used, then"
        " this field can be left empty as the default ")

    # -f is to specify the authentication key file. This file is used by mongos
    # to authenticate connections to the shards, and we'll use it in the oplog
    # threads.
    authentication.add_cli(
        "-f", "--password-file", dest="password_file", help=
        "Used to store the password for authentication."
        " Use this option if you wish to specify a"
        " username and password but don't want to"
        " type in the password. The contents of this"
        " file should be the password for the admin user.")

    def apply_fields(option, cli_values):
        if cli_values['fields']:
            option.value = cli_values['fields'].split(",")
        for field in option.value:
            if '.' in field:
                print(
                    "WARNING: mongo-connector can only successfully filter "
                    "sub-document fields for inserts and updates, "
                    "not replacements. To catch all changes on "
                    "a sub-document field, specify the name of the "
                    "sub-document instead. You are seeing this "
                    "message because you passed the name of a nested field "
                    "to the 'fields' option: %s" % field)
                break

    fields = add_option(
        config_key="fields",
        default=[],
        type=list,
        apply_function=apply_fields)

    # -i to specify the list of fields to export
    fields.add_cli(
        "-i", "--fields", dest="fields", help=
        "Used to specify the list of fields to export. "
        "Specify a field or fields to include in the export. "
        "Use a comma separated list of fields to specify multiple "
        "fields. The '_id', 'ns' and '_ts' fields are always "
        "exported.")

    def apply_namespaces(option, cli_values):
        if cli_values['ns_set']:
            option.value['include'] = cli_values['ns_set'].split(',')

        if cli_values['gridfs_set']:
            option.value['gridfs'] = cli_values['gridfs_set'].split(',')

        if cli_values['dest_ns_set']:
            ns_set = option.value['include']
            dest_ns_set = cli_values['dest_ns_set'].split(',')
            if len(ns_set) != len(dest_ns_set):
                raise errors.InvalidConfiguration(
                    "Destination namespace set should be the"
                    " same length as the origin namespace set.")
            option.value['mapping'] = dict(zip(ns_set, dest_ns_set))

        ns_set = option.value['include']
        if len(ns_set) != len(set(ns_set)):
            raise errors.InvalidConfiguration(
                "Namespace set should not contain any duplicates.")

        dest_mapping = option.value['mapping']
        if len(dest_mapping) != len(set(dest_mapping.values())):
            raise errors.InvalidConfiguration(
                "Destination namespaces set should not"
                " contain any duplicates.")

        gridfs_set = option.value['gridfs']
        if len(gridfs_set) != len(set(gridfs_set)):
            raise errors.InvalidConfiguration(
                "GridFS set should not contain any duplicates.")

    default_namespaces = {
        "include": [],
        "mapping": {},
        "gridfs": []
    }

    namespaces = add_option(
        config_key="namespaces",
        default=default_namespaces,
        type=dict,
        apply_function=apply_namespaces)

    # -n is to specify the namespaces we want to consider. The default
    # considers all the namespaces
    namespaces.add_cli(
        "-n", "--namespace-set", dest="ns_set", help=
        "Used to specify the namespaces we want to "
        "consider. For example, if we wished to store all "
        "documents from the test.test and alpha.foo "
        "namespaces, we could use `-n test.test,alpha.foo`. "
        "The default is to consider all the namespaces, "
        "excluding the system and config databases, and "
        "also ignoring the \"system.indexes\" collection in "
        "any database.")

    # -g is the destination namespace
    namespaces.add_cli(
        "-g", "--dest-namespace-set", dest="dest_ns_set", help=
        "Specify a destination namespace mapping. Each "
        "namespace provided in the --namespace-set option "
        "will be mapped respectively according to this "
        "comma-separated list. These lists must have "
        "equal length. The default is to use the identity "
        "mapping. This is currently only implemented "
        "for mongo-to-mongo connections.")

    # --gridfs-set is the set of GridFS namespaces to consider
    namespaces.add_cli(
        "--gridfs-set", dest="gridfs_set", help=
        "Used to specify the GridFS namespaces we want to "
        "consider. For example, if your metadata is stored in "
        "test.fs.files and chunks are stored in test.fs.chunks, "
        "you can use `--gridfs-set test.fs`.")

    def apply_doc_managers(option, cli_values):
        if cli_values['doc_manager'] is None:
            if cli_values['target_url']:
                raise errors.InvalidConfiguration(
                    "Cannot create a Connector with a target URL"
                    " but no doc manager.")
        else:
            if option.value is not None:
                bulk_size = option.value[0].get(
                    'bulkSize', constants.DEFAULT_MAX_BULK)
            else:
                bulk_size = constants.DEFAULT_MAX_BULK
            option.value = [{
                'docManager': cli_values['doc_manager'],
                'targetURL': cli_values['target_url'],
                'uniqueKey': cli_values['unique_key'],
                'autoCommitInterval': cli_values['auto_commit_interval'],
                'bulkSize': bulk_size
            }]

        if not option.value:
            return

        # validate doc managers and fill in default values
        for dm in option.value:
            if not isinstance(dm, dict):
                raise errors.InvalidConfiguration(
                    "Elements of docManagers must be a dict.")
            if 'docManager' not in dm:
                raise errors.InvalidConfiguration(
                    "Every element of docManagers"
                    " must contain 'docManager' property.")
            if not dm.get('targetURL'):
                dm['targetURL'] = None
            if not dm.get('uniqueKey'):
                dm['uniqueKey'] = constants.DEFAULT_UNIQUE_KEY
            if dm.get('autoCommitInterval') is None:
                dm['autoCommitInterval'] = constants.DEFAULT_COMMIT_INTERVAL
            if not dm.get('args'):
                dm['args'] = {}
            if not dm.get('bulkSize'):
                dm['bulkSize'] = constants.DEFAULT_MAX_BULK

            aci = dm['autoCommitInterval']
            if aci is not None and aci < 0:
                raise errors.InvalidConfiguration(
                    "autoCommitInterval must be non-negative.")

        def import_dm_by_name(name):
            try:
                full_name = "mongo_connector.doc_managers.%s" % name
                # importlib doesn't exist in 2.6, but __import__ is everywhere
                module = __import__(full_name, fromlist=(name,))
                dm_impl = module.DocManager
                if not issubclass(dm_impl, DocManagerBase):
                    raise TypeError("DocManager must inherit DocManagerBase.")
                return module
            except ImportError:
                raise errors.InvalidConfiguration(
                    "Could not import %s. It could be that this doc manager ha"
                    "s been moved out of this project and is maintained elsewh"
                    "ere. Make sure that you have the doc manager installed al"
                    "ongside mongo-connector. Check the README for a list of a"
                    "vailable doc managers." % full_name)
                sys.exit(1)
            except (AttributeError, TypeError):
                raise errors.InvalidConfiguration(
                    "No definition for DocManager found in %s." % full_name)
                sys.exit(1)

        # instantiate the doc manager objects
        dm_instances = []
        for dm in option.value:
            module = import_dm_by_name(dm['docManager'])
            kwargs = {
                'unique_key': dm['uniqueKey'],
                'auto_commit_interval': dm['autoCommitInterval'],
                'chunk_size': dm['bulkSize']
            }
            for k in dm['args']:
                if k not in kwargs:
                    kwargs[k] = dm['args'][k]

            target_url = dm['targetURL']
            if target_url:
                dm_instances.append(module.DocManager(target_url, **kwargs))
            else:
                dm_instances.append(module.DocManager(**kwargs))

        option.value = dm_instances

    doc_managers = add_option(
        config_key="docManagers",
        default=None,
        type=list,
        apply_function=apply_doc_managers)

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-d", "--doc-manager", dest="doc_manager", help=
        "Used to specify the path to each doc manager "
        "file that will be used. DocManagers should be "
        "specified in the same order as their respective "
        "target addresses in the --target-urls option. "
        "URLs are assigned to doc managers "
        "respectively. Additional doc managers are "
        "implied to have no target URL. Additional URLs "
        "are implied to have the same doc manager type as "
        "the last doc manager for which a URL was "
        "specified. By default, Mongo Connector will use "
        "'doc_manager_simulator.py'.  It is recommended "
        "that all doc manager files be kept in the "
        "doc_managers folder in mongo-connector. For "
        "more information about making your own doc "
        "manager, see 'Writing Your Own DocManager' "
        "section of the wiki")

    # -d is to specify the doc manager file.
    doc_managers.add_cli(
        "-t", "--target-url",
        dest="target_url", help=
        "Specify the URL to each target system being "
        "used. For example, if you were using Solr out of "
        "the box, you could use '-t "
        "http://localhost:8080/solr' with the "
        "SolrDocManager to establish a proper connection. "
        "URLs should be specified in the same order as "
        "their respective doc managers in the "
        "--doc-managers option.  URLs are assigned to doc "
        "managers respectively. Additional doc managers "
        "are implied to have no target URL. Additional "
        "URLs are implied to have the same doc manager "
        "type as the last doc manager for which a URL was "
        "specified. "
        "Don't use quotes around addresses. ")

    # -u is to specify the mongoDB field that will serve as the unique key
    # for the target system,
    doc_managers.add_cli(
        "-u", "--unique-key", dest="unique_key", help=
        "The name of the MongoDB field that will serve "
        "as the unique key for the target system. "
        "Note that this option does not apply "
        "when targeting another MongoDB cluster. "
        "Defaults to \"_id\".")

    # --auto-commit-interval to specify auto commit time interval
    doc_managers.add_cli(
        "--auto-commit-interval", type="int",
        dest="auto_commit_interval", help=
        "Seconds in-between calls for the Doc Manager"
        " to commit changes to the target system. A value of"
        " 0 means to commit after every write operation."
        " When left unset, Mongo Connector will not make"
        " explicit commits. Some systems have"
        " their own mechanism for adjusting a commit"
        " interval, which should be preferred to this"
        " option.")

    continue_on_error = add_option(
        config_key="continueOnError",
        default=False,
        type=bool)

    def apply_ssl(option, cli_values):
        option.value = option.value or {}
        ssl_certfile = cli_values.pop('ssl_certfile')
        ssl_keyfile = cli_values.pop('ssl_keyfile')
        ssl_cert_reqs = cli_values.pop('ssl_cert_reqs')
        ssl_ca_certs = (
            cli_values.pop('ssl_ca_certs') or option.value.get('sslCACerts'))

        if ssl_cert_reqs and ssl_cert_reqs != 'ignored' and not ssl_ca_certs:
            raise errors.InvalidConfiguration(
                '--ssl-ca-certs must be provided if the '
                '--ssl-certificate-policy is not "ignored".')
        option.value.setdefault('sslCertfile', ssl_certfile)
        option.value.setdefault('sslCACerts', ssl_ca_certs)
        option.value.setdefault('sslKeyfile', ssl_keyfile)
        option.value['sslCertificatePolicy'] = _SSL_POLICY_MAP.get(
            ssl_cert_reqs)
    ssl = add_option(
        config_key="ssl",
        default={},
        type=dict,
        apply_function=apply_ssl)
    ssl.add_cli(
        '--ssl-certfile', dest='ssl_certfile',
        help=('Path to a certificate identifying the local connection '
              'to MongoDB.')
    )
    ssl.add_cli(
        '--ssl-keyfile', dest='ssl_keyfile',
        help=('Path to the private key for --ssl-certfile. '
              'Not necessary if already included in --ssl-certfile.')
    )
    ssl.add_cli(
        '--ssl-certificate-policy', dest='ssl_cert_reqs',
        choices=('required', 'optional', 'ignored'),
        help=('Policy for validating SSL certificates provided from the other '
              'end of the connection. There are three possible values: '
              'required = Require and validate the remote certificate. '
              'optional = Validate the remote certificate only if one '
              'is provided. '
              'ignored = Remote SSL certificates are ignored completely.')
    )
    ssl.add_cli(
        '--ssl-ca-certs', dest='ssl_ca_certs',
        help=('Path to a concatenated set of certificate authority '
              'certificates to validate the other side of the connection. ')
    )

    # --continue-on-error to continue to upsert documents during a collection
    # dump, even if the documents cannot be inserted for some reason
    continue_on_error.add_cli(
        "--continue-on-error", action="store_true",
        dest="continue_on_error", help=
        "By default, if any document fails to upsert"
        " during a collection dump, the entire operation fails."
        " When this flag is enabled, normally fatal errors"
        " will be caught and logged, allowing the collection"
        " dump to continue.\n"
        "Note: Applying oplog operations to an incomplete"
        " set of documents due to errors may cause undefined"
        " behavior. Use this flag to dump only.")

    config_file = add_option()
    config_file.add_cli(
        "-c", "--config-file", dest="config_file", help=
        "Specify a JSON file to load configurations from. You can find"
        " an example config file at mongo-connector/config.json")

    tz_aware = add_option(
        config_key="timezoneAware", default=False, type=bool)
    tz_aware.add_cli(
        "--tz-aware", dest="tz_aware", action="store_true",
        help="Make all dates and times timezone-aware.")

    return result
Example #3
0
def get_config_options():
    result = []

    def add_option(*args, **kwargs):
        opt = config.Option(*args, **kwargs)
        result.append(opt)
        return opt

    main_address = add_option(config_key="mainAddress",
                              default="localhost:27017",
                              type=str)

    # -m is for the main address, which is a host:port pair, ideally of the
    # mongos. For non sharded clusters, it can be the primary.
    main_address.add_cli("-m",
                         "--main",
                         dest="main_address",
                         help="Specify the main address, which is a"
                         " host:port pair. For sharded clusters, this"
                         " should be the mongos address. For individual"
                         " replica sets, supply the address of the"
                         " primary. For example, `-m localhost:27217`"
                         " would be a valid argument to `-m`. Don't use"
                         " quotes around the address.")

    oplog_file = add_option(config_key="oplogFile",
                            default="oplog.timestamp",
                            type=str)

    # -o is to specify the oplog-config file. This file is used by the system
    # to store the last timestamp read on a specific oplog. This allows for
    # quick recovery from failure.
    oplog_file.add_cli("-o",
                       "--oplog-ts",
                       dest="oplog_file",
                       help="Specify the name of the file that stores the "
                       "oplog progress timestamps. "
                       "This file is used by the system to store the last "
                       "timestamp read on a specific oplog. This allows "
                       "for quick recovery from failure. By default this "
                       "is `config.txt`, which starts off empty. An empty "
                       "file causes the system to go through all the mongo "
                       "oplog and sync all the documents. Whenever the "
                       "cluster is restarted, it is essential that the "
                       "oplog-timestamp config file be emptied - otherwise "
                       "the connector will miss some documents and behave "
                       "incorrectly.")

    no_dump = add_option(config_key="noDump", default=False, type=bool)

    # --no-dump specifies whether we should read an entire collection from
    # scratch if no timestamp is found in the oplog_config.
    no_dump.add_cli("--no-dump",
                    action="store_true",
                    dest="no_dump",
                    help="If specified, this flag will ensure that "
                    "mongo_connector won't read the entire contents of a "
                    "namespace iff --oplog-ts points to an empty file.")

    def apply_verbosity(option, cli_values):
        if cli_values['verbose']:
            option.value = 3
        if option.value < 0 or option.value > 3:
            raise errors.InvalidConfiguration(
                "verbosity must be in the range [0, 3].")

    # Default is warnings and above.
    verbosity = add_option(config_key="verbosity",
                           default=1,
                           type=int,
                           apply_function=apply_verbosity)

    # -v enables verbose logging
    verbosity.add_cli("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      help="Enables verbose logging.")

    def apply_logging(option, cli_values):
        log_mechs_enabled = [
            cli_values[m] for m in ('logfile', 'enable_syslog', 'stdout')
            if cli_values[m]
        ]
        if len(log_mechs_enabled) > 1:
            raise errors.InvalidConfiguration(
                "You cannot specify more than one logging method "
                "simultaneously. Please choose the logging method you "
                "prefer. ")
        if cli_values['log_format']:
            option.value['format'] = cli_values['log_format']

        if cli_values['logfile']:
            when = cli_values['logfile_when']
            interval = cli_values['logfile_interval']
            if (when and when.startswith('W')
                    and interval != constants.DEFAULT_LOGFILE_INTERVAL):
                raise errors.InvalidConfiguration(
                    "You cannot specify a log rotation interval when rotating "
                    "based on a weekday (W0 - W6).")

            option.value['type'] = 'file'
            option.value['filename'] = cli_values['logfile']
            if when:
                option.value['rotationWhen'] = when
            if interval:
                option.value['rotationInterval'] = interval
            if cli_values['logfile_backups']:
                option.value['rotationBackups'] = cli_values['logfile_backups']

        if cli_values['enable_syslog']:
            option.value['type'] = 'syslog'

        if cli_values['syslog_host']:
            option.value['host'] = cli_values['syslog_host']

        if cli_values['syslog_facility']:
            option.value['facility'] = cli_values['syslog_facility']

        if cli_values['stdout']:
            option.value['type'] = 'stream'

        # Expand the full path to log file
        option.value['filename'] = os.path.abspath(option.value['filename'])

    default_logging = {
        'type': 'file',
        'filename': 'mongo-connector.log',
        'format': constants.DEFAULT_LOG_FORMAT,
        'rotationInterval': constants.DEFAULT_LOGFILE_INTERVAL,
        'rotationBackups': constants.DEFAULT_LOGFILE_BACKUPCOUNT,
        'rotationWhen': constants.DEFAULT_LOGFILE_WHEN,
        'host': constants.DEFAULT_SYSLOG_HOST,
        'facility': constants.DEFAULT_SYSLOG_FACILITY
    }

    logging = add_option(config_key="logging",
                         default=default_logging,
                         type=dict,
                         apply_function=apply_logging)

    # -w enables logging to a file
    logging.add_cli("-w",
                    "--logfile",
                    dest="logfile",
                    help="Log all output to the specified file.")

    logging.add_cli('--stdout',
                    dest='stdout',
                    action='store_true',
                    help='Log all output to STDOUT rather than a logfile.')

    logging.add_cli(
        "--log-format",
        dest="log_format",
        help="Define a specific format for the log file. "
        "This is based on the python logging lib. "
        "Available parameters can be found at "
        "https://docs.python.org/2/library/logging.html#logrecord-attributes")

    # -s is to enable syslog logging.
    logging.add_cli(
        "-s",
        "--enable-syslog",
        action="store_true",
        dest="enable_syslog",
        help="The syslog host, which may be an address like 'localhost:514' or, "
        "on Unix/Linux, the path to a Unix domain socket such as '/dev/log'.")

    # --syslog-host is to specify the syslog host.
    logging.add_cli("--syslog-host",
                    dest="syslog_host",
                    help="Used to specify the syslog host."
                    " The default is 'localhost:514'")

    # --syslog-facility is to specify the syslog facility.
    logging.add_cli("--syslog-facility",
                    dest="syslog_facility",
                    help="Used to specify the syslog facility."
                    " The default is 'user'")

    # --logfile-when specifies the type of interval of the rotating file
    # (seconds, minutes, hours)
    logging.add_cli(
        "--logfile-when",
        action="store",
        dest="logfile_when",
        type="string",
        help="The type of interval for rotating the log file. "
        "Should be one of "
        "'S' (seconds), 'M' (minutes), 'H' (hours), "
        "'D' (days), 'W0' - 'W6' (days of the week 0 - 6), "
        "or 'midnight' (the default). See the Python documentation "
        "for 'logging.handlers.TimedRotatingFileHandler' for more "
        "details.")

    # --logfile-interval specifies when to create a new log file
    logging.add_cli("--logfile-interval",
                    action="store",
                    dest="logfile_interval",
                    type="int",
                    help="How frequently to rotate the log file, "
                    "specifically, how many units of the rotation interval "
                    "should pass before the rotation occurs. For example, "
                    "to create a new file each hour: "
                    " '--logfile-when=H --logfile-interval=1'. "
                    "Defaults to 1. You may not use this option if "
                    "--logfile-when is set to a weekday (W0 - W6). "
                    "See the Python documentation for "
                    "'logging.handlers.TimedRotatingFileHandler' for more "
                    "details. ")

    # --logfile-backups specifies how many log files will be kept.
    logging.add_cli("--logfile-backups",
                    action="store",
                    dest="logfile_backups",
                    type="int",
                    help="How many log files will be kept after rotation. "
                    "If set to zero, then no log files will be deleted. "
                    "Defaults to 7.")

    def apply_authentication(option, cli_values):
        if cli_values['admin_username']:
            option.value['adminUsername'] = cli_values['admin_username']

        if cli_values['password']:
            option.value['password'] = cli_values['password']

        if cli_values['password_file']:
            option.value['passwordFile'] = cli_values['password_file']

        if option.value.get("adminUsername"):
            password = option.value.get("password")
            passwordFile = option.value.get("passwordFile")
            if not password and not passwordFile:
                raise errors.InvalidConfiguration(
                    "Admin username specified without password.")
            if password and passwordFile:
                raise errors.InvalidConfiguration(
                    "Can't specify both password and password file.")

    default_authentication = {
        'adminUsername': None,
        'password': None,
        'passwordFile': None
    }

    authentication = add_option(config_key="authentication",
                                default=default_authentication,
                                type=dict,
                                apply_function=apply_authentication)

    # -a is to specify the username for authentication.
    authentication.add_cli(
        "-a",
        "--admin-username",
        dest="admin_username",
        help="Used to specify the username of an admin user to "
        "authenticate with. To use authentication, the user "
        "must specify both an admin username and a keyFile.")

    # -p is to specify the password used for authentication.
    authentication.add_cli(
        "-p",
        "--password",
        dest="password",
        help="Used to specify the password."
        " This is used by mongos to authenticate"
        " connections to the shards, and in the"
        " oplog threads. If authentication is not used, then"
        " this field can be left empty as the default ")

    # -f is to specify the authentication key file. This file is used by mongos
    # to authenticate connections to the shards, and we'll use it in the oplog
    # threads.
    authentication.add_cli(
        "-f",
        "--password-file",
        dest="password_file",
        help="Used to store the password for authentication."
        " Use this option if you wish to specify a"
        " username and password but don't want to"
        " type in the password. The contents of this"
        " file should be the password for the admin user.")

    continue_on_error = add_option(config_key="continueOnError",
                                   default=False,
                                   type=bool)

    def apply_ssl(option, cli_values):
        option.value = option.value or {}
        ssl_certfile = cli_values.get('ssl_certfile')
        if ssl_certfile is None:
            ssl_certfile = option.value.get('sslCertfile')
        ssl_keyfile = cli_values.get('ssl_keyfile')
        if ssl_keyfile is None:
            ssl_keyfile = option.value.get('sslKeyfile')
        ssl_ca_certs = cli_values.get('ssl_ca_certs')
        if ssl_ca_certs is None:
            ssl_ca_certs = option.value.get('sslCACerts')
        ssl_cert_reqs = cli_values.get('ssl_cert_reqs')
        if ssl_cert_reqs is None:
            ssl_cert_reqs = option.value.get('sslCertificatePolicy')

        if ssl_cert_reqs is not None:
            if ssl_cert_reqs not in _SSL_POLICY_MAP:
                raise errors.InvalidConfiguration(
                    'sslCertificatePolicy (--ssl-certificate-policy) must be '
                    'one of %s, got "%s"' %
                    (_SSL_POLICY_MAP.keys(), ssl_cert_reqs))
            if pymongo.version_tuple < (
                    3, 0) and ssl_cert_reqs != 'ignored' and not ssl_ca_certs:
                raise errors.InvalidConfiguration(
                    '--ssl-certificate-policy is not "ignored" and '
                    '--ssl-ca-certs was not be provided. Either upgrade '
                    'PyMongo to >= 3.0 to load system provided CA '
                    'certificates or specify a CA file with --ssl-ca-certs.')

        option.value['sslCertfile'] = ssl_certfile
        option.value['sslCACerts'] = ssl_ca_certs
        option.value['sslKeyfile'] = ssl_keyfile
        option.value['sslCertificatePolicy'] = _SSL_POLICY_MAP.get(
            ssl_cert_reqs)

    ssl = add_option(config_key="ssl",
                     default={},
                     type=dict,
                     apply_function=apply_ssl)
    ssl.add_cli('--ssl-certfile',
                dest='ssl_certfile',
                help=('Path to a certificate identifying the local connection '
                      'to MongoDB.'))
    ssl.add_cli('--ssl-keyfile',
                dest='ssl_keyfile',
                help=('Path to the private key for --ssl-certfile. '
                      'Not necessary if already included in --ssl-certfile.'))
    ssl.add_cli(
        '--ssl-certificate-policy',
        dest='ssl_cert_reqs',
        choices=('required', 'optional', 'ignored'),
        help=('Policy for validating SSL certificates provided from the other '
              'end of the connection. There are three possible values: '
              'required = Require and validate the remote certificate. '
              'optional = The same as "required", unless the server was '
              'configured to use anonymous ciphers. '
              'ignored = Remote SSL certificates are ignored completely.'))
    ssl.add_cli(
        '--ssl-ca-certs',
        dest='ssl_ca_certs',
        help=('Path to a concatenated set of certificate authority '
              'certificates to validate the other side of the connection. '))

    # --continue-on-error to continue to upsert documents during a collection
    # dump, even if the documents cannot be inserted for some reason
    continue_on_error.add_cli(
        "--continue-on-error",
        action="store_true",
        dest="continue_on_error",
        help="By default, if any document fails to upsert"
        " during a collection dump, the entire operation fails."
        " When this flag is enabled, normally fatal errors"
        " will be caught and logged, allowing the collection"
        " dump to continue.\n"
        "Note: Applying oplog operations to an incomplete"
        " set of documents due to errors may cause undefined"
        " behavior. Use this flag to dump only.")

    config_file = add_option()
    config_file.add_cli(
        "-c",
        "--config-file",
        dest="config_file",
        help="Specify a JSON file to load configurations from. You can find"
        " an example config file at mongo-connector/config.json")

    tz_aware = add_option(config_key="timezoneAware", default=False, type=bool)
    tz_aware.add_cli("--tz-aware",
                     dest="tz_aware",
                     action="store_true",
                     help="Make all dates and times timezone-aware.")

    return result