Esempio n. 1
0
 def test_negative(self):
     client = Mock()
     client.nodes.info.return_value = {
         'nodes': { "bad" : "mojo"}
     }
     client.cluster.state.return_value = {
         "master_node" : "foo"
     }
     self.assertFalse(curator.is_master_node(client))
Esempio n. 2
0
 def test_positive(self):
     client = Mock()
     client.nodes.info.return_value = {
         'nodes': { "foo" : "bar"}
     }
     client.cluster.state.return_value = {
         "master_node" : "foo"
     }
     self.assertTrue(curator.is_master_node(client))
Esempio n. 3
0
 def test_negative(self):
     client = Mock()
     client.nodes.info.return_value = {
         'nodes': { "bad" : "mojo"}
     }
     client.cluster.state.return_value = {
         "master_node" : "foo"
     }
     self.assertFalse(curator.is_master_node(client))
Esempio n. 4
0
 def test_positive(self):
     client = Mock()
     client.nodes.info.return_value = {
         'nodes': { "foo" : "bar"}
     }
     client.cluster.state.return_value = {
         "master_node" : "foo"
     }
     self.assertTrue(curator.is_master_node(client))
Esempio n. 5
0
def main():
    start = time.time()

    parser = make_parser()
    arguments = parser.parse_args()

    # Initialize timeout_override
    timeout_override = True if arguments.command == 'optimize' else False

    # Argparse nearly gets all conditions covered.
    # These remain because mutually exclusive arguments must be optional.
    if arguments.command == 'alias':
        if not arguments.alias_older_than and not arguments.unalias_older_than:
            print(
                '{0} delete: error: expect one of --alias-older-than or --unalias-older-than'
                .format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'delete':
        if not arguments.older_than and not arguments.disk_space:
            print(
                '{0} delete: error: expect one of --older-than or --disk-space'
                .format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'show':
        # Do not log and force dry-run if we opt to show indices or snapshots.
        arguments.log_file = os.devnull
        arguments.dry_run = True
        if not arguments.show_indices and not arguments.show_snapshots:
            print(
                '{0} show: error: expect one of --show-indices or --show-snapshots'
                .format(sys.argv[0]))
            sys.exit(1)
        if arguments.show_snapshots and not arguments.repository:
            print(
                '{0} show: error: --repository required with --show-snapshots'.
                format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'snapshot':
        if arguments.older_than is None and arguments.most_recent is None and arguments.delete_older_than is None and not arguments.all_indices:
            print(
                '{0} snapshot: error: expect one of --all-indices, --older-than, --most-recent, or --delete-older-than'
                .format(sys.argv[0]))
            sys.exit(1)
        if arguments.older_than or arguments.most_recent or arguments.all_indices:
            timeout_override = True

    # Setup logging
    if arguments.debug:
        numeric_log_level = logging.DEBUG
        format_string = '%(asctime)s %(levelname)-9s %(name)22s %(funcName)22s:%(lineno)-4d %(message)s'
    else:
        numeric_log_level = getattr(logging, arguments.log_level.upper(), None)
        format_string = '%(asctime)s %(levelname)-9s %(message)s'
        if not isinstance(numeric_log_level, int):
            raise ValueError('Invalid log level: %s' % arguments.log_level)

    handler = logging.StreamHandler(
        open(arguments.log_file, 'a') if arguments.log_file else sys.stderr)
    if arguments.logformat == 'logstash':
        handler.setFormatter(LogstashFormatter())
    else:
        handler.setFormatter(logging.Formatter(format_string))
    logging.root.addHandler(handler)
    logging.root.setLevel(numeric_log_level)

    # Filter out logging from Elasticsearch and associated modules by default
    if not arguments.debug:
        for handler in logging.root.handlers:
            handler.addFilter(
                Whitelist('root', '__main__', 'curator', 'curator.curator'))

    # Setting up NullHandler to handle nested elasticsearch.trace Logger instance in elasticsearch python client
    logging.getLogger('elasticsearch.trace').addHandler(NullHandler())

    logging.info("Job starting...")

    if arguments.dry_run:
        logging.info("DRY RUN MODE.  No changes will be made.")

    # Override the timestamp in case the end-user doesn't.
    if timeout_override and arguments.timeout == 30:
        logger.info(
            'Default timeout of 30 seconds is too low for command {0}.  Overriding to 21,600 seconds (6 hours).'
            .format(arguments.command.upper()))
        arguments.timeout = 21600

    client = elasticsearch.Elasticsearch(host=arguments.host,
                                         http_auth=arguments.auth,
                                         port=arguments.port,
                                         url_prefix=arguments.url_prefix,
                                         timeout=arguments.timeout,
                                         use_ssl=arguments.ssl)

    # Verify the version is acceptable.
    check_version(client)

    if arguments.master_only and not curator.is_master_node(client):
        logger.info(
            'Master-only flag detected. Connected to non-master node. Aborting.'
        )
        sys.exit(0)

    if arguments.command != "show":
        if arguments.timestring:
            validate_timestring(arguments.timestring, arguments.time_unit)
        else:  # Set default timestrings
            arguments.timestring = DATEMAP[arguments.time_unit]
            logging.debug("Setting default timestring for {0} to {1}".format(
                arguments.time_unit, arguments.timestring))
        logging.debug("Matching indices with pattern: {0}{1}".format(
            arguments.prefix, arguments.timestring))

    # Execute the command specified in the arguments
    argdict = arguments.__dict__
    logging.debug("argdict = {0}".format(argdict))
    arguments.func(client, **argdict)

    logger.info('Done in {0}.'.format(timedelta(seconds=time.time() - start)))
Esempio n. 6
0
def main():
    start = time.time()

    parser = make_parser()
    arguments = parser.parse_args()

    # Initialize timeout_override
    timeout_override = True if arguments.command == 'optimize' else False

    # Argparse nearly gets all conditions covered.
    # These remain because mutually exclusive arguments must be optional.
    if arguments.command == 'alias':
        if not arguments.alias_older_than and not arguments.unalias_older_than:
            print('{0} delete: error: expect one of --alias-older-than or --unalias-older-than'.format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'delete':
        if not arguments.older_than and not arguments.disk_space:
            print('{0} delete: error: expect one of --older-than or --disk-space'.format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'show':
        # Do not log and force dry-run if we opt to show indices or snapshots.
        arguments.log_file = os.devnull
        arguments.dry_run = True
        if not arguments.show_indices and not arguments.show_snapshots:
            print('{0} show: error: expect one of --show-indices or --show-snapshots'.format(sys.argv[0]))
            sys.exit(1)
        if arguments.show_snapshots and not arguments.repository:
            print('{0} show: error: --repository required with --show-snapshots'.format(sys.argv[0]))
            sys.exit(1)

    if arguments.command == 'snapshot':
        if not arguments.older_than and not arguments.most_recent and not arguments.delete_older_than and not arguments.all_indices:
            print('{0} snapshot: error: expect one of --all-indices, --older-than, --most-recent, or --delete-older-than'.format(sys.argv[0]))
            sys.exit(1)
        if arguments.older_than or arguments.most_recent:
            timeout_override = True

    # Setup logging
    if arguments.debug:
        numeric_log_level = logging.DEBUG
        format_string = '%(asctime)s %(levelname)-9s %(name)22s %(funcName)22s:%(lineno)-4d %(message)s'
    else:
        numeric_log_level = getattr(logging, arguments.log_level.upper(), None)
        format_string = '%(asctime)s %(levelname)-9s %(message)s'
        if not isinstance(numeric_log_level, int):
            raise ValueError('Invalid log level: %s' % arguments.log_level)
    
    date_string = None
    if arguments.logformat == 'logstash':
        os.environ['TZ'] = 'UTC'
        time.tzset()
        format_string = '{"@timestamp":"%(asctime)s.%(msecs)03dZ", "loglevel":"%(levelname)s", "name":"%(name)s", "function":"%(funcName)s", "linenum":"%(lineno)d", "message":"%(message)s"}'
        date_string = '%Y-%m-%dT%H:%M:%S'

    logging.basicConfig(level=numeric_log_level,
                        format=format_string,
                        datefmt=date_string,
                        stream=open(arguments.log_file, 'a') if arguments.log_file else sys.stderr)

    # Filter out logging from Elasticsearch and associated modules by default
    if not arguments.debug:
        for handler in logging.root.handlers:
            handler.addFilter(Whitelist('root', '__main__', 'curator', 'curator.curator'))

    # Setting up NullHandler to handle nested elasticsearch.trace Logger instance in elasticsearch python client
    logging.getLogger('elasticsearch.trace').addHandler(NullHandler())

    logging.info("Job starting...")

    if arguments.dry_run:
        logging.info("DRY RUN MODE.  No changes will be made.")

    # Override the timestamp in case the end-user doesn't.
    if timeout_override and arguments.timeout == 30:
        logger.info('Default timeout of 30 seconds is too low for command {0}.  Overriding to 21,600 seconds (6 hours).'.format(arguments.command.upper()))
        arguments.timeout = 21600

    client = elasticsearch.Elasticsearch(host=arguments.host, http_auth=arguments.auth, port=arguments.port, url_prefix=arguments.url_prefix, timeout=arguments.timeout, use_ssl=arguments.ssl)
    
    # Verify the version is acceptable.
    check_version(client)
    
    if arguments.master_only and not curator.is_master_node(client):
        logger.info('Master-only flag detected. Connected to non-master node. Aborting.')
        sys.exit(0)

    if arguments.command != "show":
        if arguments.timestring:
            validate_timestring(arguments.timestring, arguments.time_unit)
        else: # Set default timestrings
            arguments.timestring = DATEMAP[arguments.time_unit]
            logging.debug("Setting default timestring for {0} to {1}".format(arguments.time_unit, arguments.timestring))
        logging.debug("Matching indices with pattern: {0}{1}".format(arguments.prefix,arguments.timestring))

    # Execute the command specified in the arguments
    argdict = arguments.__dict__
    logging.debug("argdict = {0}".format(argdict))
    arguments.func(client, **argdict)

    logger.info('Done in {0}.'.format(timedelta(seconds=time.time()-start)))