Пример #1
0
 def get_service(self, inputs):
     server_uri = str(inputs.metadata["server_uri"])
     splunk_port = server_uri[18:]
     session_key = str(inputs.metadata["session_key"])
     args = {'host':'localhost','port':splunk_port,'token':session_key}
     service = Service(**args)
     return service
Пример #2
0
 def _create_unauthenticated_service(self):
     return Service(
         **{
             'host': self.opts.kwargs['host'],
             'port': self.opts.kwargs['port'],
             'scheme': self.opts.kwargs['scheme']
         })
Пример #3
0
    def service(self):
        """ Returns a Splunk service object for this script invocation.

        The service object is created from the Splunkd URI and session key
        passed to the command invocation on the modular input stream. It is
        available as soon as the :code:`Script.stream_events` method is
        called.

        :return: :class:splunklib.client.Service. A value of None is returned,
        if you call this method before the :code:`Script.stream_events` method
        is called.

        """
        if self._service is not None:
            return self._service

        if self._input_definition is None:
            return None

        splunkd_uri = self._input_definition.metadata["server_uri"]
        session_key = self._input_definition.metadata["session_key"]

        splunkd = urlsplit(splunkd_uri, allow_fragments=False)

        self._service = Service(
            scheme=splunkd.scheme,
            host=splunkd.hostname,
            port=splunkd.port,
            token=session_key,
        )

        return self._service
Пример #4
0
def check_splunk():    
    setup_django_environment()
    
    from django.conf import settings
    from splunklib.client import Service
    
    host = settings.SPLUNKD_HOST
    port = settings.SPLUNKD_PORT
    service = Service(
        token="unnecessary_token",
        host=host,
        port=port
    )
    
    version = [0]
    try:   
        info = service.info()
        version = map(int, info.version.split("."))
    except Exception as e:
        print "Could not connect to Splunk at %s:%s" % (host, port)
        sys.exit(1)
        
    # Make sure it is greater than Splunk 5.0, or an internal build
    if (version[0] < 5 and not version[0] > 1000):
        print "You have Splunk %s, but Splunk AppFx requires Splunk 5.0 or later" % info.version
        sys.exit(1)
Пример #5
0
 def do_additional_setup(self):
     log_level = self.inputs.get('restarter').get('log_level', 'INFO')
     logger.setLevel(log.parse_log_level(log_level))
     self.splunkd_messages_service = Service(port=getDefault('port'),
                                             token=self.session_key,
                                             app=em_constants.APP_NAME,
                                             owner='nobody').messages
Пример #6
0
    def from_url(cls, server_uri, stanza):

        parts = urllib.parse.urlparse(server_uri)
        scheme = parts.scheme
        host = parts.hostname
        port = parts.port

        service = Service(
            scheme=scheme,
            host=host,
            port=port,
            username=parts.username,
            password=parts.password,
        )
        service.login()

        server_scheme = scheme
        server_host = host
        server_port = port
        token = service.token
        log_dir = environ.get_log_folder()

        kind, name = cls._split_stanza(stanza)
        inputs = list()
        for item in service.inputs:
            if item.kind != kind:
                continue
            if item.name != name:
                continue
            stanza = Stanza(kind, name, item.content)
            inputs.append(stanza)

        checkpoint_dir = environ.get_checkpoint_folder(kind)
        return cls(server_scheme, server_host, server_port, token,
                   checkpoint_dir, log_dir, inputs)
Пример #7
0
 def __init__(self):
     self.svc = Service(port=getDefault('port'),
                        token=session['authtoken'],
                        app=em_constants.APP_NAME,
                        owner='nobody')
     self.savedsearch_with_states = []
     self._prepare_savedsearches()
     self.checkpoint = MigrationProcessControlCheckpoint.get_or_create()
Пример #8
0
 def conf_stanzas(cls):
     svc = Service(
         port=getDefault('port'),
         token=session['authtoken'],
         app=cls.app_name(),
         owner='nobody',
     )
     conf = svc.confs[cls.storage_name()]
     return conf
def oauth2_token_updater(token):
    
    try:
        args = {'host':'localhost','port':SPLUNK_PORT,'token':SESSION_TOKEN}
        service = Service(**args)   
        item = service.inputs.__getitem__(STANZA[7:])
        item.update(oauth2_access_token=token["access_token"],oauth2_refresh_token=token["refresh_token"],activation_key=activation_key)
    except RuntimeError,e:
        logging.error("Looks like an error updating the oauth2 token: %s" % str(e))
Пример #10
0
 def store(cls):
     svc = Service(
         port=getDefault('port'),
         token=session['authtoken'],
         app=cls.app_name(),
         owner='nobody',
     )
     store = svc.kvstore[cls.storage_name()]
     return store
Пример #11
0
    def _clone_existing_service(self):
        """
        Clones the existing service with the exception that it re-reads the
        connection info from the Splunk instance.

        @return: The newly created Service
        @rtype: Service
        """
        return Service(handler=self.DEFAULT_HANDLER, **self._service_arguments)
 def do_additional_setup(self):
     log_level = self.inputs.get('job', {}).get('log_level', 'WARNING')
     logger.setLevel(log.parse_log_level(log_level))
     self.service = Service(
         port=getDefault('port'),
         token=session['authtoken'],
         app=em_constants.APP_NAME,
         owner='nobody',
     )
Пример #13
0
    def __init__(
        self,
        splunk,
        username=None,
        password=None,
        namespace=None,
        sharing=DEFAULT_SHARING,
        owner=None,
        app=None,
    ):
        """
        Creates a new connector.

        The connector will not be logged in when created so you have to manually
        login.

        @param splunk: The Splunk instance
        @type splunk: L{..splunk.Splunk}
        @param username: The username to use. If None (default)
                         L{Connector.DEFAULT_USERNAME} is used.
        @type username: str
        @param password: The password to use. If None (default)
                         L{Connector.DEFAULT_PASSWORD} is used.
        @type password: str
        @param namespace: Deprecated. user owner and app instead.
        @type namespace: str
        @param sharing: used by python sdk service
        @type sharing: str
        @param owner: used by python sdk service
        @type owner: str
        @param app: used by python sdk service
        @type app: str
        """

        super(SDKConnector, self).__init__(splunk,
                                           username=username,
                                           password=password,
                                           owner=owner,
                                           app=app)
        if namespace is not None and namespace != self.namespace:
            msg = ("namespace is derecated. please use owner and app. "
                   "Your namespace setting : %s, owner&app setting:%s" %
                   (namespace, self.namespace))
            self.logger.error(msg)
            raise Exception(msg)
        self.sharing = (
            sharing  # accepting None value, so SDK takes owner and app blindly.
        )

        self._service = Service(handler=self.DEFAULT_HANDLER,
                                **self._service_arguments)
        splunk.register_start_listener(self._recreate_service)

        # TODO: TEMPORARY FOR EST-1859
        self._server_settings_endpoint = Endpoint(self._service,
                                                  self.PATH_SERVER_SETTINGS)
def checkParamUpdated(cached,current,rest_name):
    
    if not (cached == current):
        try:
            args = {'host':'localhost','port':SPLUNK_PORT,'token':SESSION_TOKEN}
            service = Service(**args)   
            item = service.inputs.__getitem__(STANZA[7:])
            item.update(**{rest_name:current,"activation_key":activation_key})
        except RuntimeError,e:
            logging.error("Looks like an error updating the modular input parameter %s: %s" % (rest_name,str(e),))   
 def service(self):
     if not self.session_key:
         raise CustomAlertActionException(
             'session key not set before accessing service')
     if self._service:
         return self._service
     self._service = Service(token=self.session_key,
                             app=em_constants.APP_NAME,
                             owner='nobody')
     return self._service
Пример #16
0
def update_rayid(req_args, ray_id):
    if 'start_id' not in req_args['params'] or req_args['params']['start_id'] == ray_id:
        return

    try:
        service = Service(host='localhost', port=SPLUNK_PORT, token=SESSION_TOKEN)
        item = service.inputs.__getitem__(STANZA[13:])
        item.update(last_ray_id=req_args['params']['start_id'])
    except RuntimeError,e:
        logging.error("Looks like an error updating the modular input parameter last_ray_id: %s" % (rest_name,str(e),))
Пример #17
0
 def create_splunk_service(self):
     app_name = self._app_name
     context = self._context
     service = Service(
         scheme=context.server_scheme,
         host=context.server_host,
         port=context.server_port,
         token=context.token,
         owner='nobody',
         app=app_name,
     )
     return service
Пример #18
0
    def bulk_delete(cls, delete_filter_dict=None, exclusion_list=None):
        '''
        Bulk delete entities specified by delete_filter_dict, if delete_filter_dict is None then delete all entities
        This method will also delete any associated alerts of the deleted entities

        :param delete_filter_dict: a entity filter dict that specifies the entities to delete. e.g. {'_key': ['a', 'b']}
        :param exclusion_list: a list of keys of entities that should *NOT* be deleted
        '''
        # build exclusion query
        exclusion_list = [] if exclusion_list is None else exclusion_list
        exclusion_query = None
        if len(exclusion_list):
            exclusion_filter = {'_key': exclusion_list}
            exclusion_query = em_common.negate_special_mongo_query(
                cls.convert_filter_to_kvstore_query(exclusion_filter))

        # build delete query
        filter_delete_query = cls.convert_filter_to_kvstore_query(
            delete_filter_dict)
        if exclusion_query is None:
            delete_query = filter_delete_query
        else:
            if filter_delete_query is None:
                filter_delete_query = {}
            delete_query = {'$and': [filter_delete_query, exclusion_query]}

        # get key of entities to be deleted
        entities_to_delete = super(EmEntity, cls).load(limit=0,
                                                       skip=0,
                                                       sort_keys_and_orders=[],
                                                       fields='',
                                                       query=delete_query)
        entity_keys_to_delete = [entity.key for entity in entities_to_delete]

        if len(entity_keys_to_delete):
            # bulk delete entities
            cls.storage_bulk_delete(delete_query)
            # bulk delete all associated alert savedsearch
            svc = Service(token=session['authtoken'],
                          app=em_constants.APP_NAME,
                          owner='nobody')
            # delete alerts in batches to avoid 'Request-URI Too Long'
            batch_size = 4000  # found by trial and error
            batches = (
                entity_keys_to_delete[x:x + batch_size]
                for x in range(0, len(entity_keys_to_delete), batch_size))
            for batch in batches:
                for alert_ss in svc.saved_searches.iter(search=' OR '.join(
                        'alert.managedBy={}:{}'.format(em_constants.APP_NAME,
                                                       eid) for eid in batch)):
                    logger.info('bulk delete cleanup - deleting alert %s' %
                                alert_ss.name)
                    alert_ss.delete()
def _build_config(splunkd_uri, session_key):
    splunkd_info = urlparse(splunkd_uri)
    service = Service(
        scheme=splunkd_info.scheme,
        host=splunkd_info.hostname,
        port=splunkd_info.port,
        token=session_key,
        owner='nobody',
        app='Splunk_TA_aws',
    )
    config = ConfigManager(service)

    return config
Пример #20
0
 def do_additional_setup(self):
     # set log level
     log_level = self.inputs.get('job', {}).get('log_level', 'WARNING')
     logger.setLevel(log.parse_log_level(log_level))
     # set up message service
     self.splunkd_messages_service = Service(port=getDefault('port'),
                                             token=session['authtoken'],
                                             app=APP_NAME,
                                             owner='nobody').messages
     # set up conf file manager
     self.inputs_conf = conf_manager.ConfManager(
         session['authtoken'], APP_NAME,
         port=getDefault('port')).get_conf('inputs')
Пример #21
0
 def setUp(self):
     AmpStorageWrapper.COLLECTION_NAME = self.COLLECTION_NAME
     self.service = Service(owner='nobody',
                            app='amp4e_events_input',
                            scheme=SPLUNK_AUTH_OPTIONS['scheme'],
                            host=SPLUNK_AUTH_OPTIONS['host'],
                            port=SPLUNK_AUTH_OPTIONS['port'],
                            username=SPLUNK_AUTH_OPTIONS['username'],
                            password=SPLUNK_AUTH_OPTIONS['password'])
     self.service.login()
     self.service.kvstore.create(self.COLLECTION_NAME)
     self.metadata = MockDefinitions(self.service.token).metadata
     self.stream_representation = {'input_name': self.metadata['name']}
     self.storage = AmpStorageWrapper(self.metadata)
Пример #22
0
    def bulk_delete(cls, delete_filter_dict=None, exclusion_list=None):
        '''
        Bulk delete groups specified by delete_filter_dict, if delete_filter_dict is None then delete all groups
        This method will also delete any associated alerts of the deleted groups

        :param delete_filter_dict: a entity filter dict that specifies the groups to delete. e.g. {'_key': ['a', 'b']}
        :param exclusion_list: a list of keys of groups that should *NOT* be deleted
        '''
        # build exclusion query
        exclusion_list = [] if exclusion_list is None else exclusion_list
        exclusion_query = None
        if len(exclusion_list):
            exclusion_filter = {'_key': exclusion_list}
            exclusion_query = em_common.negate_special_mongo_query(
                em_common.convert_query_params_to_mongoDB_query(
                    exclusion_filter, MONGODB_RESPECT_CASE))

        # build delete query
        delete_filter_dict = {} if delete_filter_dict is None else delete_filter_dict
        filter_delete_query = em_common.convert_query_params_to_mongoDB_query(
            delete_filter_dict, MONGODB_RESPECT_CASE)

        if exclusion_query is None:
            delete_query = filter_delete_query
        else:
            delete_query = {'$and': [filter_delete_query, exclusion_query]}

        # get key of groups to be deleted
        groups_to_delete = super(EMGroup, cls).load(limit=0,
                                                    skip=0,
                                                    sort_keys_and_orders=[],
                                                    fields='',
                                                    query=delete_query)
        group_keys_to_delete = [group.key for group in groups_to_delete]

        # bulk delete groups
        if len(group_keys_to_delete):
            cls.storage_bulk_delete(delete_query)
            # bulk delete all associated alert savedsearch
            svc = Service(token=session['authtoken'],
                          app=em_constants.APP_NAME,
                          owner='nobody')
            for alert_ss in svc.saved_searches.iter(search=' OR '.join(
                    'alert.managedBy={}:{}'.format(em_constants.APP_NAME, key)
                    for key in group_keys_to_delete)):
                logger.info('bulk delete cleanup - deleting alert %s' %
                            alert_ss.name)
                alert_ss.delete()
Пример #23
0
def query_queue_attributes(
    session_key,
    aws_account,
    aws_iam_role,
    region_name,
    sqs_queue_url,
):
    scheme, host, port = get_splunkd_access_info()
    service = Service(scheme=scheme, host=host, port=port, token=session_key)
    config = ConfigManager(service)
    factory = AWSCredentialsProviderFactory(config)
    provider = factory.create(aws_account, aws_iam_role)
    credentials_cache = AWSCredentialsCache(provider)
    client = credentials_cache.client('sqs', region_name)
    queue = SQSQueue(sqs_queue_url, region_name)
    return queue.get_attributes(client)
    def service(self):
        """ Returns a Splunk service object for this command invocation or None.

        The service object is created from the Splunkd URI and authentication token passed to the command invocation in
        the search results info file. This data is not passed to a command invocation by default. You must request it by
        specifying this pair of configuration settings in commands-scpv2.conf:

           .. code-block:: python
               enableheader = true
               requires_srinfo = true

        The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The
        :code:`requires_srinfo` setting is false by default. Hence, you must set it.

        :return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
        :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
        of :code:`None` is returned.

        """
        if self._service is not None:
            return self._service

        metadata = self._metadata

        if metadata is None:
            return None

        try:
            searchinfo = self._metadata.searchinfo
        except AttributeError:
            return None

        splunkd_uri = searchinfo.splunkd_uri

        if splunkd_uri is None:
            return None

        uri = urlsplit(splunkd_uri, allow_fragments=False)

        self._service = Service(scheme=uri.scheme,
                                host=uri.hostname,
                                port=uri.port,
                                app=searchinfo.app,
                                token=searchinfo.session_key)

        return self._service
Пример #25
0
 def __init__(self,
              host='localhost',
              port='8089',
              scheme='https',
              username='******',
              password='******'):
     self._service = Service(owner='nobody',
                             app='amp4e_events_input',
                             scheme=scheme,
                             host=host,
                             port=port,
                             username=username,
                             password=password)
     # self._service = Service(owner='nobody', app='amp4e_events_input', scheme='https', host='localhost', port='8089',
     #                         username='******', password='******')
     self._service.login()
     self.collection = self._service.kvstore['AmpEventStreams']
Пример #26
0
def get_service(search_results_info_path):
    global _service
    if _service:
        return _service

    search_results_info = get_search_results_info(search_results_info_path)

    _, netloc, _, _, _ = urlsplit(search_results_info.splunkd_uri,
                                  search_results_info.splunkd_protocol,
                                  allow_fragments=False)
    splunkd_host, _ = netloc.split(':')
    _service = Service(scheme=search_results_info.splunkd_protocol,
                       host=splunkd_host,
                       port=search_results_info.splunkd_port,
                       token=search_results_info.auth_token,
                       app=search_results_info.ppc_app)
    return _service
Пример #27
0
def set_service():
    global service
    splunkd_uri = calculate_local_splunkd_protocolhostport()
    token = cli_get_sessionkey(splunkd_uri)
    if token is None:
        logging.error('Please log in first by running `bin/splunk login`.')
        sys.exit(1)

    splunkd = urlsplit(splunkd_uri, allow_fragments=False)

    service = Service(owner='nobody',
                      scheme=splunkd.scheme,
                      host=splunkd.hostname,
                      port=splunkd.port,
                      token=token)
    service.login()

    return True
Пример #28
0
    def service(self):
        """ Returns a Splunk service object for this command invocation or None.

        The service object is created from the Splunkd URI and authentication
        token passed to the command invocation in the search results info file.
        This data is not passed to a command invocation by default. You must
        request it by specifying this pair of configuration settings in
        commands.conf:

           .. code-block:: python
               enableheader=true
               requires_srinfo=true

        The :code:`enableheader` setting is :code:`true` by default. Hence, you
        need not set it. The :code:`requires_srinfo` setting is false by
        default. Hence, you must set it.

        :return: :class:`splunklib.client.Service`, if :code:`enableheader` and
            :code:`requires_srinfo` are both :code:`true`. Otherwise, if either
            :code:`enableheader` or :code:`requires_srinfo` are :code:`false`,
            a value of :code:`None` is returned.

        """
        if self._service is not None:
            return self._service

        info = self.search_results_info

        if info is None:
            return None

        _, netloc, _, _, _ = urlsplit(info.splunkd_uri,
                                      info.splunkd_protocol,
                                      allow_fragments=False)

        splunkd_host, _ = netloc.split(':')

        self._service = Service(scheme=info.splunkd_protocol,
                                host=splunkd_host,
                                port=info.splunkd_port,
                                token=info.auth_token,
                                app=info.ppc_app)

        return self._service
Пример #29
0
    def _list_rules(self, conf_info):
        aws_account = self.callerArgs.data['aws_account'][0]
        aws_iam_role = self.callerArgs.data.get('aws_iam_role', [None])[0]
        region_name = self.callerArgs.data['aws_region'][0]

        scheme, host, port = get_splunkd_access_info()
        service = Service(scheme=scheme,
                          host=host,
                          port=port,
                          token=self.getSessionKey())
        config = ConfigManager(service)
        factory = AWSCredentialsProviderFactory(config)
        provider = factory.create(aws_account, aws_iam_role)
        credentials_cache = AWSCredentialsCache(provider)
        client = credentials_cache.client('config', region_name)
        all_rules = []
        next_token = ""
        while 1:
            try:
                response = client.describe_config_rules(NextToken=next_token)
            except Exception as e:
                logger.error('Failed to describe config rules')
                msg = str(e.message)
                logger.error(msg)
                raise RestError(400, 'Failed to describe config rules: ' + msg)

            if not tacommon.is_http_ok(response):
                logger.error("Failed to describe config rules, errorcode=%s",
                             tacommon.http_code(response))
                return

            rules = response.get("ConfigRules")
            if not rules:
                break

            all_rules.extend(rule["ConfigRuleName"] for rule in rules)

            next_token = response.get("NextToken")
            if not next_token:
                break

        for rule in all_rules:
            conf_info[rule].append("rule_names", rule)
    def __call__(self):
        if self.metadata is None:
            return None

        splunkd_uri = self.metadata['server_uri']
        session_key = self.metadata['session_key']

        splunkd = urlsplit(splunkd_uri, allow_fragments=False)

        self._service = Service(
            owner=self.owner,
            app=self.app,
            scheme=splunkd.scheme,
            host=splunkd.hostname,
            port=splunkd.port,
            token=session_key,
        )

        return self._service