Beispiel #1
0
    def import_data(self):
        if len(sys.argv) < 2:
            self.logger.logln("no product")
            return

        product = sys.argv[1]

        start_id = 0
        if len(sys.argv) >= 3:
            start_id = int(sys.argv[2])

        recreate_index = True
        if len(sys.argv) >= 4:
            if sys.argv[3] == "1" or sys.argv[3] == "True" or sys.argv[3] == "true":
                recreate_index = True
            else:
                recreate_index = False

        dates = []
        if len(sys.argv) <= 4:
            dates.append( str(datetime.now().date()) )
        else:
            i = 4
            while i < len(sys.argv):
                dates.append( str(sys.argv[i]) )
                i += 1

        if len(dates) == 0:
            self.logger.logln("no dates")
            return

        kibana = Kibana(self.logger.logln)
        for date in dates:
            kibana.import_data(date, product, start_id, recreate_index)
Beispiel #2
0
    def import_data(self):
        if len(sys.argv) < 2:
            self.logger.logln("no product")
            return

        product = sys.argv[1]

        start_id = 0
        if len(sys.argv) >= 3:
            start_id = int(sys.argv[2])

        recreate_index = True
        if len(sys.argv) >= 4:
            if sys.argv[3] == "1" or sys.argv[3] == "True" or sys.argv[
                    3] == "true":
                recreate_index = True
            else:
                recreate_index = False

        dates = []
        if len(sys.argv) <= 4:
            dates.append(str(datetime.now().date()))
        else:
            i = 4
            while i < len(sys.argv):
                dates.append(str(sys.argv[i]))
                i += 1

        if len(dates) == 0:
            self.logger.logln("no dates")
            return

        kibana = Kibana(self.logger.logln)
        for date in dates:
            kibana.import_data(date, product, start_id, recreate_index)
Beispiel #3
0
 def import_data(self):
     try:
         #yesterday = '2016-10-22'
         yesterday = date.today() - timedelta(1)
         names = self._read_config()
         kibana = Kibana(self.logger.logln)
         for name in names:
             kibana.import_data(str(yesterday), name, 0, True)
         self.logger.logln("SUCCESS.")
     except BaseException as e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         self.logger.logln("Exception " + str(e))
         for line in traceback.format_tb(exc_traceback):
             self.logger.log(line)
Beispiel #4
0
def kibana_upload(toml_files, url, cloud_id, user, password):
    """Upload a list of rule .toml files to Kibana."""
    from uuid import uuid4
    from .packaging import manage_versions

    with Kibana(cloud_id=cloud_id, url=url) as kibana:
        kibana.login(user, password)

        file_lookup = load_rule_files(paths=toml_files)
        rules = list(load_rules(file_lookup=file_lookup).values())

        # assign the versions from etc/versions.lock.json
        # rules that have changed in hash get incremented, others stay as-is.
        # rules that aren't in the lookup default to version 1
        manage_versions(rules, verbose=False)

        api_payloads = []

        for rule in rules:
            payload = rule.contents.copy()
            meta = payload.setdefault("meta", {})
            meta["original"] = dict(id=rule.id, **rule.metadata)
            payload["rule_id"] = str(uuid4())
            api_payloads.append(RuleResource.from_dict(payload))

        rules = RuleResource.bulk_create(api_payloads)
        click.echo(f"Successfully uploaded {len(rules)} rules")
Beispiel #5
0
    def setUp(self):
        admin_user = os.getenv("ES_SUPERUSER_USER", "admin")
        admin_password = os.getenv("ES_SUPERUSER_PASS", "changeme")
        self.admin_es = Elasticsearch([self.get_elasticsearch_url(admin_user, admin_password)])
        self.es = Elasticsearch([self.get_elasticsearch_url()])
        self.kibana = Kibana(self.get_kibana_url())

        delete_pipelines = [] if self.skip_clean_pipelines else default_pipelines
        cleanup(self.admin_es, delete_pipelines=delete_pipelines)
        self.kibana.delete_all_agent_config()

        super(ElasticTest, self).setUp()

        # try make sure APM Server is fully up
        self.wait_until_ilm_logged()
        self.wait_until_pipeline_logged()
Beispiel #6
0
    def loop(self):
        if self.date == datetime.now().date():
            self.logger.logln("sleep" + str(datetime.today().time()))
            sleep(60 * 60)
            return

        if self.time > datetime.today().time():
            self.logger.logln("sleep" + str(datetime.today().time()))
            sleep(60 * 60)
            return

        kibana = Kibana()

        recreate_index = True
        for product in self.products:
            kibana.import_data(str(self.date), product, 0, recreate_index)
            recreate_index = False

        self.date = datetime.today().date()
Beispiel #7
0
    def loop(self):
        if self.date == datetime.now().date():
            self.logger.logln("sleep"+str(datetime.today().time()))
            sleep(60*60)
            return

        if self.time > datetime.today().time():
            self.logger.logln("sleep"+str(datetime.today().time()))
            sleep(60*60)
            return

        kibana = Kibana()
        
        recreate_index = True
        for product in self.products:
            kibana.import_data( str(self.date), product, 0, recreate_index )
            recreate_index = False

        self.date = datetime.today().date()
Beispiel #8
0
 def import_data(self):
     try:
         cur_time = gmtime(time() - 1*60*60 + 10)
         names = self._read_config()
         kibana = Kibana(self.logger.logln)
         kibana.set_beg_time( strftime('%H:00:00', cur_time) )
         kibana.set_end_time( strftime('%H:59:59', cur_time) )
         
         recreate_index = True
         if int(strftime('%H')) < 10:
             kibana.set_beg_time( None )
         else:
             recreate_index = False
         
         for name in names:
             kibana.import_data( str(date.today()), name, 0, recreate_index )
         self.logger.logln("SUCCESS.")
     except BaseException as e:
         exc_type, exc_value, exc_traceback = sys.exc_info()
         self.logger.logln("Exception " + str(e))
         for line in traceback.format_tb(exc_traceback):
             self.logger.log(line)
Beispiel #9
0
def get_kibana_client(cloud_id, kibana_url, kibana_user, kibana_password, kibana_cookie, **kwargs):
    """Get an authenticated Kibana client."""
    if not (cloud_id or kibana_url):
        client_error("Missing required --cloud-id or --kibana-url")

    if not kibana_cookie:
        # don't prompt for these until there's a cloud id or Kibana URL
        kibana_user = kibana_user or click.prompt("kibana_user")
        kibana_password = kibana_password or click.prompt("kibana_password", hide_input=True)

    with Kibana(cloud_id=cloud_id, kibana_url=kibana_url, **kwargs) as kibana:
        if kibana_cookie:
            kibana.add_cookie(kibana_cookie)
        else:
            kibana.login(kibana_user, kibana_password)
        return kibana
Beispiel #10
0
def get_kibana_client(cloud_id, kibana_url, kibana_user, kibana_password,
                      kibana_cookie, space, ignore_ssl_errors, provider_type,
                      provider_name, **kwargs):
    """Get an authenticated Kibana client."""
    from requests import HTTPError
    from kibana import Kibana

    if not (cloud_id or kibana_url):
        client_error("Missing required --cloud-id or --kibana-url")

    if not kibana_cookie:
        # don't prompt for these until there's a cloud id or Kibana URL
        kibana_user = kibana_user or click.prompt("kibana_user")
        kibana_password = kibana_password or click.prompt("kibana_password",
                                                          hide_input=True)

    verify = not ignore_ssl_errors

    with Kibana(cloud_id=cloud_id,
                kibana_url=kibana_url,
                space=space,
                verify=verify,
                **kwargs) as kibana:
        if kibana_cookie:
            kibana.add_cookie(kibana_cookie)
            return kibana

        try:
            kibana.login(kibana_user,
                         kibana_password,
                         provider_type=provider_type,
                         provider_name=provider_name)
        except HTTPError as exc:
            if exc.response.status_code == 401:
                err_msg = f'Authentication failed for {kibana_url}. If credentials are valid, check --provider-name'
                client_error(err_msg, exc, err=True)
            else:
                raise

        return kibana
Beispiel #11
0
def kibana_upload(toml_files, kibana_url, cloud_id, user, password):
    """Upload a list of rule .toml files to Kibana."""
    from uuid import uuid4
    from .packaging import manage_versions
    from .schemas import downgrade

    if not cloud_id or kibana_url:
        raise click.ClickException("Missing required --cloud-id or --kibana-url")

    # don't prompt for these until there's a cloud id or kibana URL
    user = user or click.prompt("user")
    password = password or click.prompt("password", hide_input=True)

    with Kibana(cloud_id=cloud_id, url=kibana_url) as kibana:
        kibana.login(user, password)

        file_lookup = load_rule_files(paths=toml_files)
        rules = list(load_rules(file_lookup=file_lookup).values())

        # assign the versions from etc/versions.lock.json
        # rules that have changed in hash get incremented, others stay as-is.
        # rules that aren't in the lookup default to version 1
        manage_versions(rules, verbose=False)

        api_payloads = []

        for rule in rules:
            payload = rule.contents.copy()
            meta = payload.setdefault("meta", {})
            meta["original"] = dict(id=rule.id, **rule.metadata)
            payload["rule_id"] = str(uuid4())
            payload = downgrade(payload, kibana.version)
            rule = RuleResource(payload)
            api_payloads.append(rule)

        rules = RuleResource.bulk_create(api_payloads)
        click.echo(f"Successfully uploaded {len(rules)} rules")
Beispiel #12
0
class ElasticTest(ServerBaseTest):
    skip_clean_pipelines = False

    def config(self):
        cfg = super(ElasticTest, self).config()
        cfg.update({
            "elasticsearch_host": self.get_elasticsearch_url(),
            "file_enabled": "false",
            "kibana_enabled": "false",
        })
        cfg.update(self.config_overrides)
        return cfg

    def setUp(self):
        admin_user = os.getenv("ES_SUPERUSER_USER", "admin")
        admin_password = os.getenv("ES_SUPERUSER_PASS", "changeme")
        self.admin_es = Elasticsearch(
            [self.get_elasticsearch_url(admin_user, admin_password)])
        self.es = Elasticsearch([self.get_elasticsearch_url()])
        self.kibana = Kibana(self.get_kibana_url())

        delete_pipelines = [] if self.skip_clean_pipelines else default_pipelines
        cleanup(self.admin_es, delete_pipelines=delete_pipelines)
        self.kibana.delete_all_agent_config()

        super(ElasticTest, self).setUp()

        # try make sure APM Server is fully up
        self.wait_until_ilm_logged()
        self.wait_until_pipeline_logged()

    def wait_until_ilm_logged(self):
        setup_enabled = self.config().get("ilm_setup_enabled")
        msg = "Finished index management setup." if setup_enabled != "false" else "Manage ILM setup is disabled."
        wait_until(lambda: self.log_contains(msg), name="ILM setup")

    def wait_until_pipeline_logged(self):
        registration_enabled = self.config().get("register_pipeline_enabled")
        msg = "Registered Ingest Pipelines successfully" if registration_enabled != "false" else "No pipeline callback registered"
        wait_until(lambda: self.log_contains(msg),
                   name="pipelines registration")

    def load_docs_with_template(self,
                                data_path,
                                url,
                                endpoint,
                                expected_events_count,
                                query_index=None,
                                max_timeout=10,
                                extra_headers=None,
                                file_mode="r"):

        if query_index is None:
            query_index = apm_prefix

        headers = {'content-type': 'application/x-ndjson'}
        if extra_headers:
            headers.update(extra_headers)

        with open(data_path, file_mode) as f:
            r = requests.post(url, data=f, headers=headers)
        assert r.status_code == 202, r.status_code

        # Wait to give documents some time to be sent to the index
        self.wait_for_events(endpoint,
                             expected_events_count,
                             index=query_index,
                             max_timeout=max_timeout)

    def wait_for_events(self,
                        processor_name,
                        expected_count,
                        index=None,
                        max_timeout=10):
        """
        wait_for_events waits for an expected number of event docs with the given
        'processor.name' value, and returns the hits when found.
        """
        if index is None:
            index = apm_prefix

        query = {"term": {"processor.name": processor_name}}
        result = {}  # TODO(axw) use "nonlocal" when we migrate to Python 3

        def get_docs():
            hits = self.es.search(index=index, body={"query": query})['hits']
            result['docs'] = hits['hits']
            return hits['total']['value'] == expected_count

        wait_until(
            get_docs,
            max_timeout=max_timeout,
            name="{} documents to reach {}".format(processor_name,
                                                   expected_count),
        )
        return result['docs']

    def check_backend_error_sourcemap(self, index, count=1):
        rs = self.es.search(index=index,
                            params={"rest_total_hits_as_int": "true"})
        assert rs['hits'][
            'total'] == count, "found {} documents, expected {}".format(
                rs['hits']['total'], count)
        for doc in rs['hits']['hits']:
            err = doc["_source"]["error"]
            for exception in err.get("exception", []):
                self.check_for_no_smap(exception)
            if "log" in err:
                self.check_for_no_smap(err["log"])

    def check_backend_span_sourcemap(self, count=1):
        rs = self.es.search(index=index_span,
                            params={"rest_total_hits_as_int": "true"})
        assert rs['hits'][
            'total'] == count, "found {} documents, expected {}".format(
                rs['hits']['total'], count)
        for doc in rs['hits']['hits']:
            self.check_for_no_smap(doc["_source"]["span"])

    def check_for_no_smap(self, doc):
        if "stacktrace" not in doc:
            return
        for frame in doc["stacktrace"]:
            assert "sourcemap" not in frame, frame

    def logged_requests(self, url="/intake/v2/events"):
        for line in self.get_log_lines():
            jline = json.loads(line)
            u = urlparse(jline.get("URL", ""))
            if jline.get("logger") == "request" and u.path == url:
                yield jline

    def approve_docs(self, base_path, received):
        """
        approve_docs compares the received documents to those contained
        in the file at ${base_path}.approved.json. If that file does not
        exist, then it is considered equivalent to a lack of documents.

        Only the document _source is compared, and we ignore differences
        in some context-sensitive fields such as the "observer", which
        may vary between test runs.
        """
        base_path = self._beat_path_join(os.path.dirname(__file__), base_path)
        approved_path = base_path + '.approved.json'
        received_path = base_path + '.received.json'
        try:
            with open(approved_path) as f:
                approved = json.load(f)
        except IOError:
            approved = []

        # get_doc_id returns a value suitable for sorting and identifying
        # documents: either a unique ID, or a timestamp. This is necessary
        # since not all event types require a unique ID (namely, errors do
        # not.)
        #
        # We return (0, doc['error']['id']) when the event type is 'error'
        # if that field exists, otherwise returns (1, doc['@timestamp']).
        # The first tuple element exists to sort IDs before timestamps.
        def get_doc_id(doc):
            doc_type = doc['processor']['event']
            if 'id' in doc.get(doc_type, {}):
                return (0, doc[doc_type]['id'])
            if doc_type == 'metric' and 'transaction' in doc:
                transaction = doc['transaction']
                if 'histogram' in transaction.get('duration', {}):
                    # Transaction histogram documents are published periodically
                    # by the apm-server, so we cannot use the timestamp. Instead,
                    # use the transaction name, type, and result (a subset of the
                    # full aggregation key, but good enough for our tests).
                    fields = [
                        transaction.get(field, '')
                        for field in ('type', 'name', 'result')
                    ]
                    return (1, '_'.join(fields))
            return (2, doc['@timestamp'])

        received = [doc['_source'] for doc in received]
        received.sort(key=get_doc_id)

        try:
            for rec in received:
                # Overwrite received observer values with the approved ones,
                # in order to avoid noise in the 'approvals' diff if there are
                # any other changes.
                #
                # We don't compare the observer values between received/approved,
                # as they are dependent on the environment.
                rec_id = get_doc_id(rec)
                rec_observer = rec['observer']
                self.assertEqual(
                    set(rec_observer.keys()),
                    set([
                        "hostname", "version", "id", "ephemeral_id", "type",
                        "version_major"
                    ]))
                assert rec_observer["version"].startswith(
                    str(rec_observer["version_major"]) + ".")
                for appr in approved:
                    if get_doc_id(appr) == rec_id:
                        rec['observer'] = appr['observer']
                        # ensure both docs have the same event keys set
                        self.assertEqual(
                            rec.get("event", {}).keys(),
                            appr.get("event", {}).keys())
                        # We don't compare the event values between received/approved
                        # as they are dependent on the environment.
                        if 'event' in rec:
                            rec['event'] = appr['event']
                        break
            assert len(received) == len(approved)
            for i, rec in enumerate(received):
                appr = approved[i]
                rec_id = get_doc_id(rec)
                assert rec_id == get_doc_id(
                    appr), "New entry with id {}".format(rec_id)
                for k, v in rec.items():
                    self.assertEqual(v, appr[k])
        except Exception as exc:
            with open(received_path, 'w') as f:
                json.dump(received,
                          f,
                          indent=4,
                          separators=(',', ': '),
                          sort_keys=True)

            # Create a dynamic Exception subclass so we can fake its name to look like the original exception.
            class ApprovalException(Exception):
                def __init__(self, cause):
                    super(ApprovalException, self).__init__(cause.args)

                def __str__(self):
                    return "{}\n\nReceived data differs from approved data. Run 'make update check-approvals' to verify the diff.".format(
                        self.args)

            ApprovalException.__name__ = type(exc).__name__
            raise ApprovalException(exc).with_traceback(sys.exc_info()[2])