예제 #1
0
    def getDeploymentID(self, cookie):
        if not self.deploymentID:
            self.getToken(cookie)
            self.server_uri = rest.makeSplunkdUri()
            splunkd = Splunkd(token=self.token, server_uri=self.server_uri)

            telemetry_conf_service = TelemetryConfService(splunkd,
                                                          is_read_only=True)
            telemetry_conf_service.fetch()

            deployment_id_manager = DeploymentIdManager(
                splunkd, telemetry_conf_service=telemetry_conf_service)

            self.deploymentID = deployment_id_manager.get_deployment_id()
        return self.deploymentID
예제 #2
0
    def __init__(self,
                 splunkrc=SPLUNKRC,
                 telemetryConfService=None,
                 serverInfoService=None):
        """Constructor.
        It grabs a query_runner object according to the splunkrc params provided:
            - If splunkrc is a dictionary, it will instantiates a new QueryRuner object.
            - If given other object type, it will do Dependency Injection on query_runner
        """
        splunkrc = (splunkrc or SPLUNKRC)
        if type(splunkrc) is dict:
            self.query_runner = QueryRunner(splunkrc)
        else:
            self.query_runner = splunkrc

        self.profile = {}
        self.service = self.query_runner._splunkd.service

        if not telemetryConfService:
            self.telemetry_conf_service = TelemetryConfService(self.service)
        else:
            self.telemetry_conf_service = telemetryConfService

        if not serverInfoService:
            self.server_info_service = ServerInfoService(self.service)
        else:
            self.server_info_service = serverInfoService

        self.telemetry_conf_service.fetch()
        self.server_info_service.fetch()

        self.service_bundle = ServiceBundle(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.salt_manager = SaltManager(self.service_bundle)

        self.deployment_id_manager = DeploymentIdManager(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.roles = {role: True for role in self.server_info['server_roles']}

        # gets cluster info from endpoint
        self._load_json({
            "end_point": "cluster/config/config",
            "name": "cluster_config"
        })

        # Valid values: (master | slave | searchhead | disabled)
        # Note that for a searchhead (with SHC or not) the value will be 'disabled' (rather than 'searchhead')
        # if there is no indexer clustering.
        # If call fails set cluster_mode to disabled.
        self.profile['cluster_mode'] = self._nested_get(
            self.profile, 'cluster_config.entry.content.mode', 'disabled')

        # gets search captain info from endpoint. noProxy is required so that it fails when instance is not the captain
        self._load_json(
            {
                "end_point": "shcluster/captain/info",
                "name": "captain_info"
            },
            noProxy=True,
            default={})

        # if captain/info returns a value it is captain  : overwrites server roles
        # this is failing so removing for the time being
        # self.roles['shc_captain'] = bool(self.profile.get('captain_info'))

        # if mode is not disabled then add in_cluster to roles   : overwrites server roles
        # Note: 'in_cluster' doesn't mean 'in a cluster', it means 'in a deployment that has indexer clustering'.
        self.roles['in_cluster'] = not self.profile.get(
            'cluster_mode') == 'disabled'
        #   overwrites server roles
        self.roles['cluster_master'] = self.profile.get(
            'cluster_mode') == 'master'

        # determines if the current node has lead_role
        self.roles['lead_node'] = self.eval_instance()

        self._get_visibility()
예제 #3
0
class InstanceProfile(object):
    """InstanceProfile.

    This class will retrieve the instance's information.

    self.server_info = server information will be stored here
    self.visibility  = visibility information will be stored here
    """
    def __init__(self,
                 splunkrc=SPLUNKRC,
                 telemetryConfService=None,
                 serverInfoService=None):
        """Constructor.
        It grabs a query_runner object according to the splunkrc params provided:
            - If splunkrc is a dictionary, it will instantiates a new QueryRuner object.
            - If given other object type, it will do Dependency Injection on query_runner
        """
        splunkrc = (splunkrc or SPLUNKRC)
        if type(splunkrc) is dict:
            self.query_runner = QueryRunner(splunkrc)
        else:
            self.query_runner = splunkrc

        self.profile = {}
        self.service = self.query_runner._splunkd.service

        if not telemetryConfService:
            self.telemetry_conf_service = TelemetryConfService(self.service)
        else:
            self.telemetry_conf_service = telemetryConfService

        if not serverInfoService:
            self.server_info_service = ServerInfoService(self.service)
        else:
            self.server_info_service = serverInfoService

        self.telemetry_conf_service.fetch()
        self.server_info_service.fetch()

        self.service_bundle = ServiceBundle(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.salt_manager = SaltManager(self.service_bundle)

        self.deployment_id_manager = DeploymentIdManager(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.roles = {role: True for role in self.server_info['server_roles']}

        # gets cluster info from endpoint
        self._load_json({
            "end_point": "cluster/config/config",
            "name": "cluster_config"
        })

        # Valid values: (master | slave | searchhead | disabled)
        # Note that for a searchhead (with SHC or not) the value will be 'disabled' (rather than 'searchhead')
        # if there is no indexer clustering.
        # If call fails set cluster_mode to disabled.
        self.profile['cluster_mode'] = self._nested_get(
            self.profile, 'cluster_config.entry.content.mode', 'disabled')

        # gets search captain info from endpoint. noProxy is required so that it fails when instance is not the captain
        self._load_json(
            {
                "end_point": "shcluster/captain/info",
                "name": "captain_info"
            },
            noProxy=True,
            default={})

        # if captain/info returns a value it is captain  : overwrites server roles
        # this is failing so removing for the time being
        # self.roles['shc_captain'] = bool(self.profile.get('captain_info'))

        # if mode is not disabled then add in_cluster to roles   : overwrites server roles
        # Note: 'in_cluster' doesn't mean 'in a cluster', it means 'in a deployment that has indexer clustering'.
        self.roles['in_cluster'] = not self.profile.get(
            'cluster_mode') == 'disabled'
        #   overwrites server roles
        self.roles['cluster_master'] = self.profile.get(
            'cluster_mode') == 'master'

        # determines if the current node has lead_role
        self.roles['lead_node'] = self.eval_instance()

        self._get_visibility()

    def eval_instance(self):
        req_list = [
            {
                "requirements": [
                    'indexer', '!search_peer', '!cluster_slave', '!shc_member',
                    '!cluster_master', '!shc_captain', '!cluster_search_head'
                ],
                "label":
                "Single",
                "result":
                True
            },
            {
                "requirements": ['cluster_master'],
                "label": "Cluster Master",
                "result": True
            },
            {
                "requirements": ['!cluster_master', 'in_cluster'],
                "label": "Cluster Member not Cluster Master",
                "result": False
            },
            # assume we are already not a cluster member from the above requirements
            {
                "requirements": ['shc_captain'],
                "label": "Search Captain in a non cluster",
                "result": True
            },
            {
                "requirements": [
                    '!cluster_master', 'search_head', '!search_peer',
                    '!in_cluster', '!cluster_slave', '!shc_member'
                ],
                "label":
                "Single Search Head",
                "result":
                True
            },
        ]

        for req in req_list:
            result = evaluate_roles(self.roles, req["requirements"])

            if result:
                report.report("instance.type", req["label"])
                return req["result"]
            else:
                report.report("instance.type", None)

    def opt_in_is_up_to_date(self):
        return self.telemetry_conf_service.opt_in_is_up_to_date()

    @property
    def server_info(self):
        return self.server_info_service.content

    def retry_transaction(self):
        self.telemetry_conf_service.retry_cluster_master_sync_transaction()

    def sync_deployment_id(self):
        self.deployment_id_manager.sync_deployment_id()

    def sync_salt(self):
        self.salt_manager.sync_with_cluster()

    def get_deployment_id(self):
        return self.deployment_id_manager.get_deployment_id()

    def _get_visibility(self):

        self.visibility = []
        for name, field in VISIBILITY_FIELDS_BY_NAME.iteritems():
            if int(self.telemetry_conf_service.content.get(field) or 0):
                self.visibility.append(name)

        if not self.opt_in_is_up_to_date():
            self.visibility = ['license'
                               ] if 'license' in self.visibility else []

        self.visibility.sort()

    def _nested_get(self, dic, path, default=0, separator='.'):
        """NestedGet.
        default path separator is .
        default value is 0
        """
        keys = path.split(separator)
        for key in keys[:-1]:
            dic = dic.setdefault(key, {})

        if type(dic) is dict:
            return default
        return dic.get(keys[-1])

    def _load_json(self, endpoint, noProxy=False, default={}):
        '''
        calls endpoint['end_point'] and assigns the results to `self.profile[end_point['name']]`
        :param endpoint:
        :return:
        '''
        try:
            path = self._construct_path(endpoint, noProxy)
            payload = self.service.http.request(
                path, {
                    'method': 'GET',
                    'headers': self.service._auth_headers
                }).get('body')

            if payload:
                result = (spldata.load(payload.read()))
                self.profile[endpoint['name']] = result['feed']
        # often if license does not permit this call it will return a 402 as exception
        except Exception:
            self.profile[endpoint['name']] = default
            return False

        return True

    def _construct_path(self, endpoint, noProxy):
        path = self.service.authority \
               + self.service._abspath(endpoint["end_point"], owner=self.query_runner._splunkd.namespace['owner'],
                                       app=self.query_runner._splunkd.namespace['app'])
        if (noProxy):
            path += "?noProxy=true"
        return path
예제 #4
0
    if services.server_info_service.is_lite():
        prefix += 'LIGHT'

# Migration of the deployment ID from V1 of instrumentation
# requires waiting until the KV store is ready. We'll give
# it 5 minutes, then proceed without out.
t_start = time.time()
status = services.server_info_service.content.get('kvStoreStatus')
while status == 'starting' and (time.time() - t_start) < (5 * 60):
    time.sleep(10)
    services.server_info_service.fetch()
    status = services.server_info_service.content.get('kvStoreStatus')

deployment_id_manager = DeploymentIdManager(
    services.splunkd,
    telemetry_conf_service=services.telemetry_conf_service,
    server_info_service=services.server_info_service,
    prefix=prefix)

# "Managed" Variable Sync Strategy
# --------------------------------
#
# Managed variables have complex lifecycles, and require
# synchronization among multiple nodes in a splunk deployment.
# This leads to their abstraction behind "manager" class interfaces.
#
# The strategy for syncing them is as follows:
#
# * On Splunk start (when this script is triggered):
# ** Pull (or "sync") whatever value is at the cluster master,
#    overwriting any local value.
    def update(self,
               cherrypy,
               services=None,
               deployment_id_manager=None,
               salt_manager=None):
        '''
        Updates the volatile data members of the swa context.
        This method is hit each time an HTML page is hit, so the
        less work done here the better.
        '''

        if services is None:
            splunkd = Splunkd(token=cherrypy.session.get('sessionKey'),
                              server_uri=self.server_uri)
            telemetry_conf_service = TelemetryConfService(splunkd,
                                                          is_read_only=True)
            telemetry_conf_service.fetch()
            # Specialize the telemetry_conf_service to be read only up front,
            # use the default construction for other services.
            services = ServiceBundle(
                splunkd, telemetry_conf_service=telemetry_conf_service)

        if not self.instance_guid:
            self.instance_guid = services.server_info_service.content.get(
                'guid')

        salt_manager = salt_manager or SaltManager(services)
        self.salt = salt_manager.get_salt()

        deployment_id_manager = deployment_id_manager or DeploymentIdManager(
            splunkd, telemetry_conf_service=services.telemetry_conf_service)

        self.deployment_id = deployment_id_manager.get_deployment_id() or ''

        self.opt_in_is_up_to_date = services.telemetry_conf_service.opt_in_is_up_to_date(
        )

        self.swa_base_url = services.telemetry_conf_service.content.get(
            'swaEndpoint')

        hash_key = self.salt + splunk.auth.getCurrentUser()['name']
        if sys.version_info >= (3, 0):
            hash_key = hash_key.encode()

        self.user_id = hashlib.sha256(hash_key).hexdigest()

        self.send_anonymized_web_analytics = conf_bool(
            services.telemetry_conf_service.content.get(
                'sendAnonymizedWebAnalytics'))

        visibilities = []

        if services.server_info_service.is_cloud():
            visibilities = ['anonymous', 'support']
        else:
            if conf_bool(
                    services.telemetry_conf_service.content.get(
                        'sendAnonymizedUsage')):
                visibilities.append('anonymous')

            if conf_bool(
                    services.telemetry_conf_service.content.get(
                        'sendSupportUsage')):
                visibilities.append('support')

        self.visibility = ','.join(visibilities)

        if self.send_anonymized_web_analytics and not self.cds_url:
            self.cds_url = get_quick_draw().get('url')
def main(services, salt_manager, deployment_id_manager, OnSplunkStart):

    OnSplunkStart.wait_for_kv_store_started(services)

    OnSplunkStart.initialize_salt(salt_manager)

    OnSplunkStart.initialize_deployment_id(services, deployment_id_manager)

    if services.server_info_service.is_cloud():
        OnSplunkStart.opt_in_for_cloud_instrumentation(services)
    else:
        # Cloud should never opt-in for license sharing,
        # so only apply the default on-prem
        OnSplunkStart.migrate_licensing_opt_in_default(services)


if __name__ == '__main__':
    try:
        token = sys.stdin.read().rstrip()
        splunkd = Splunkd(token=token, server_uri=constants.SPLUNKD_URI)
        services = ServiceBundle(splunkd)
        salt_manager = SaltManager(services)
        deployment_id_manager = DeploymentIdManager(
            services.splunkd,
            telemetry_conf_service=services.telemetry_conf_service,
            server_info_service=services.server_info_service)
        main(services, salt_manager, deployment_id_manager, OnSplunkStart)
    except Exception as e:
        logger.error(e)
        exit(0)
예제 #7
0
class InstanceProfile(object):
    """InstanceProfile.

    This class will retrive the instance's information.

    self.server_info = server information will be stored here
    self.visibility  = visibility information will be stored here
    """
    def __init__(self,
                 splunkrc=SPLUNKRC,
                 telemetryConfService=None,
                 serverInfoService=None):
        """Constructor.
        It grabs a query_runner object according to the splunkrc params provided:
            - If splunkrc is a dictionary, it will instantiates a new QueryRuner object.
            - If given other object type, it will do Dependency Injection on query_runner
        """
        splunkrc = (splunkrc or SPLUNKRC)
        if type(splunkrc) is dict:
            self.query_runner = QueryRunner(splunkrc)
        else:
            self.query_runner = splunkrc

        self.profile = {}
        self.service = self.query_runner._splunkd.service
        if not telemetryConfService:
            self.telemetry_conf_service = TelemetryConfService(self.service)
        else:
            self.telemetry_conf_service = telemetryConfService

        if not serverInfoService:
            self.server_info_service = ServerInfoService(self.service)
        else:
            self.server_info_service = serverInfoService

        self.telemetry_conf_service.fetch()
        self.server_info_service.fetch()

        self.service_bundle = ServiceBundle(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.salt_manager = SaltManager(self.service_bundle)

        self.deployment_id_manager = DeploymentIdManager(
            self.service,
            telemetry_conf_service=self.telemetry_conf_service,
            server_info_service=self.server_info_service)

        self.roles = {role: True for role in self.server_info['server_roles']}

        # gets cluster info from endpoint
        self._load_json({
            "end_point": "cluster/config/config",
            "name": "cluster_config"
        })

        # if call fails set cluster_mode to disabled
        self.profile['cluster_mode'] = self._nested_get(
            self.profile, 'cluster_config.entry.content.mode', 'disabled')

        # gets search captian info from endpoint. noProxy is required so that it fails when instance is not the captain
        self._load_json(
            {
                "end_point": "shcluster/captain/info",
                "name": "captain_info"
            },
            noProxy=True,
            default={})

        # if captain/info returns a value it is caption  : overwrites server roles
        # this is failing so removing for the time being
        # self.roles['shc_captain'] = bool(self.profile.get('captain_info'))

        # if mode is not disabled then add in_cluster to roles   : overwrites server roles
        self.roles['in_cluster'] = not self.profile.get(
            'cluster_mode') == 'disabled'
        #   overwrites server roles
        self.roles['cluster_master'] = self.profile.get(
            'cluster_mode') == 'master'

        self._get_visibility()

    def opt_in_is_up_to_date(self):
        opt_in_version_str = self.telemetry_conf_service.content.get(
            'optInVersion') or ''
        opt_in_version_acknowledged_str = self.telemetry_conf_service.content.get(
            'optInVersionAcknowledged') or ''

        if not re.match('^[0-9]+$', opt_in_version_str):
            opt_in_version_str = None
        if not re.match('^[0-9]+$', opt_in_version_acknowledged_str):
            opt_in_version_acknowledged_str = None

        opt_in_version = int(
            opt_in_version_str) if opt_in_version_str else None
        opt_in_version_acknowledged = int(
            opt_in_version_acknowledged_str
        ) if opt_in_version_acknowledged_str else None

        if not opt_in_version:
            # Should only happen if somebody removes the field manually
            # In that case, fall back to legacy behavior (ignore this check)
            return True

        if not opt_in_version_acknowledged:
            # Passed the check above, so we have a version number but no acknowledgement.
            # So, they're not up-to-date.
            return False

        return opt_in_version_acknowledged >= opt_in_version

    @property
    def server_info(self):
        return self.server_info_service.content

    def retry_transaction(self):
        self.telemetry_conf_service.retry_cluster_master_sync_transaction()

    def sync_deployment_id(self):
        self.deployment_id_manager.sync_deployment_id()

    def sync_salt(self):
        self.salt_manager.sync_with_cluster()

    def get_deployment_id(self):
        return self.deployment_id_manager.get_deployment_id()

    def _get_visibility(self):
        self.visibility = []
        for name, field in VISIBILITY_FIELDS_BY_NAME.iteritems():
            if int(self.telemetry_conf_service.content.get(field)):
                self.visibility.append(name)
        self.visibility.sort()

    def _nested_get(self, dic, path, default=0, separator='.'):
        """NestedGet.
        default path separator is .
        default value is 0
        """
        keys = path.split(separator)
        for key in keys[:-1]:
            dic = dic.setdefault(key, {})

        if type(dic) is dict:
            return default
        return dic.get(keys[-1])

    def _load_json(self, endpoint, noProxy=False, default={}):
        '''
        calls endpoint['end_point'] and assigns the results to `self.profile[end_point['name']]`
        :param endpoint:
        :return:
        '''
        try:
            path = self._construct_path(endpoint, noProxy)
            payload = self.service.http.request(
                path, {
                    'method': 'GET',
                    'headers': self.service._auth_headers
                }).get('body')
            if payload:
                result = (spldata.load(payload.read()))
                self.profile[endpoint['name']] = result['feed']
        # often if lisence does not permit this call it will return a 402 as exception
        except Exception:
            self.profile[endpoint['name']] = default
            return False

        return True

    def _construct_path(self, endpoint, noProxy):
        path = self.service.authority \
               + self.service._abspath(endpoint["end_point"], owner=self.query_runner._splunkd.namespace['owner'],
                                       app=self.query_runner._splunkd.namespace['app'])
        if (noProxy):
            path += "?noProxy=true"
        return path
예제 #8
0
                        help='Splunk password',
                        required=True)

    parser.add_argument('--prefix',
                        help='Desired prefix for the deployment ID',
                        required=False)

    args = parser.parse_args()

    splunkrc = {
        'username': args.user,
        'password': args.password
    }

    splunkd = Splunkd(**splunkrc)
    deployment_id_manager = DeploymentIdManager(splunkd)

    deploymentID = deployment_id_manager.get_deployment_id(no_create=True)

    if deploymentID is not None and deploymentID.startswith(args.prefix or ''):
        print("Deployment ID already initialized: %s" %
              deploymentID)
        # Only failures to set are considered error conditions.
        # So the exit code for an existing deployment ID is still 0.
        exit(0)
    else:
        deployment_id_manager.generate_new_deployment_id(prefix=args.prefix)
        deployment_id_manager.write_deployment_id_to_conf_file()
        print("Deployment ID successfully initialized: %s" %
              deployment_id_manager.get_deployment_id(no_create=True))
        exit(0)