def read_handler(
    session: Optional[SessionProxy],
    request: ResourceHandlerRequest,
    callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
    LOG.info("Starting %s Read Handler", TYPE_NAME)
    model = request.desiredResourceState

    dashboard_id = model.Id

    with v1_client(
            model.DatadogCredentials.ApiKey,
            model.DatadogCredentials.ApplicationKey,
            model.DatadogCredentials.ApiURL,
            TELEMETRY_TYPE_NAME,
            __version__,
    ) as api_client:
        api_instance = DashboardsApi(api_client)
        try:
            dash = api_instance.get_dashboard(dashboard_id)
            json_dict = ApiClient.sanitize_for_serialization(dash)
            model.DashboardDefinition = json.dumps(json_dict)
        except ApiException as e:
            LOG.error(
                "Exception when calling DashboardsApi->get_dashboard: %s\n", e)
            return ProgressEvent(status=OperationStatus.FAILED,
                                 resourceModel=model,
                                 message=f"Error getting dashboard: {e}")
    return ProgressEvent(
        status=OperationStatus.SUCCESS,
        resourceModel=model,
    )
示例#2
0
def send_to_datadog(context):
    # Initialize
    tags = {
        "dataset": os.environ["DATASET_NAME"],
        "dataset_repository_branch": DATASET_REPOSITORY_BRANCH,
        "external_dataset_repository": IS_EXTERNAL,
        "config_repository": CONFIG_REPOSITORY,
        "config_repository_branch": CONFIG_REPOSITORY_BRANCH,
        "dataset_repository_branch": os.environ["DATASET_REPOSITORY_BRANCH"],
        "dataset_commit": os.environ["DATASET_COMMIT"],
        "workflow": os.environ["GITHUB_WORKFLOW"],
        "config": os.environ["CONFIG"],
        "pr_url": os.environ["PR_URL"],
        "accelerator_type": os.environ["ACCELERATOR_TYPE"],
        "github_run_id": os.environ["GITHUB_RUN_ID"],
        "github_sha": os.environ["GITHUB_SHA"],
        "github_event": os.environ["GITHUB_EVENT_NAME"],
        "type": os.environ["TYPE"],
        "branch": os.environ["BRANCH"],
        "env": DD_ENV,
        "service": DD_SERVICE,
    }
    tags_list = [f"{k}:{v}" for k, v in tags.items()]

    # Send  metrics
    metrics = {
        "test_run_time": os.environ["TEST_RUN_TIME"],
        "train_run_time": os.environ["TRAIN_RUN_TIME"],
        "total_run_time": os.environ["TOTAL_RUN_TIME"],
    }
    timestamp = datetime.datetime.now().timestamp()

    series = []
    for metric_name, metric_value in metrics.items():
        overall_seconds = transform_to_seconds(metric_value)
        series.append(
            Series(
                metric=f"{METRIC_PREFIX}{metric_name}.gauge",
                type="gauge",
                points=[Point([timestamp, overall_seconds])],
                tags=tags_list,
            )
        )

    body = MetricsPayload(series=series)
    with ApiClient(Configuration()) as api_client:
        api_instance = MetricsApi(api_client)
        response = api_instance.submit_metrics(body=body)
        if response.get('status') != 'ok':
            print(response)
def v1_client(api_key: str, app_key: str, api_url: str, resource_name: str, resource_version: str) -> ApiClient:
    configuration = Configuration(
        host=api_url or "https://api.datadoghq.com",
        api_key={
            "apiKeyAuth": api_key,
            "appKeyAuth": app_key,
        }
    )

    with ApiClient(configuration) as api_client:
        try:
            plugin_ver = pkg_resources.get_distribution('cloudformation_cli_python_lib').version
        except ValueError:
            # Fallback if we're unable to retrieve the plugin version for any reason
            plugin_ver = "NA"
        api_client.user_agent = f"aws-cloudformation-datadog/{plugin_ver} (resource-name {resource_name}; resource-version {resource_version}) {api_client.user_agent}"
        yield api_client
def main():
    module = AnsibleModule(argument_spec=dict(
        api_key=dict(required=True, no_log=True),
        api_host=dict(required=False, default="https://api.datadoghq.com"),
        app_key=dict(required=True, no_log=True),
        state=dict(
            required=False, choices=["present", "absent"], default="present"),
        monitor_tags=dict(required=False, type="list", elements="str"),
        scope=dict(required=False, type="list", elements="str"),
        monitor_id=dict(required=False, type="int"),
        downtime_message=dict(required=False, no_log=True),
        start=dict(required=False, type="int"),
        end=dict(required=False, type="int"),
        timezone=dict(required=False, type="str"),
        rrule=dict(required=False, type="str"),
        id=dict(required=False, type="int"),
    ))

    # Prepare Datadog
    if not HAS_DATADOG:
        module.fail_json(msg=missing_required_lib("datadog-api-client"),
                         exception=DATADOG_IMP_ERR)

    configuration = Configuration(host=module.params["api_host"],
                                  api_key={
                                      "apiKeyAuth": module.params["api_key"],
                                      "appKeyAuth": module.params["app_key"]
                                  })
    with ApiClient(configuration) as api_client:
        api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format(
            api_client.user_agent)
        api_instance = DowntimesApi(api_client)

        # Validate api and app keys
        try:
            api_instance.list_downtimes(current_only=True)
        except ApiException as e:
            module.fail_json(
                msg=
                "Failed to connect Datadog server using given app_key and api_key: {0}"
                .format(e))

        if module.params["state"] == "present":
            schedule_downtime(module, api_client)
        elif module.params["state"] == "absent":
            cancel_downtime(module, api_client)
示例#5
0
def send_to_datadog(results: List[Dict[Text, Any]]) -> None:
    """Sends metrics to datadog."""
    # Prepare
    tags_list = prepare_datadog_tags()
    timestamp = datetime.datetime.now().timestamp()
    series = []

    # Send metrics about runtime
    metrics_runtime = create_dict_of_env(METRICS)
    for metric_name, metric_value in metrics_runtime.items():
        overall_seconds = transform_to_seconds(metric_value)
        series.append(
            Series(
                metric=f"{METRIC_RUNTIME_PREFIX}{metric_name}.gauge",
                type="gauge",
                points=[Point([timestamp, overall_seconds])],
                tags=tags_list,
            )
        )

    # Send metrics about ML model performance
    metrics_ml = prepare_ml_metrics(results)
    for metric_name, metric_value in metrics_ml.items():
        series.append(
            Series(
                metric=f"{METRIC_ML_PREFIX}{metric_name}.gauge",
                type="gauge",
                points=[Point([timestamp, float(metric_value)])],
                tags=tags_list,
            )
        )

    body = MetricsPayload(series=series)
    with ApiClient(Configuration()) as api_client:
        api_instance = MetricsApi(api_client)
        response = api_instance.submit_metrics(body=body)
        if response.get("status") != "ok":
            print(response)
from dateutil.parser import parse as dateutil_parser
from datadog_api_client.v1 import ApiClient, ApiException, Configuration
from datadog_api_client.v1.api import logs_api
from datadog_api_client.v1.models import *
from pprint import pprint

# remember to set environment variables with BASH
# export DD_API_KEY=
# export DD_APP_KEY=
# export DD_SITE="datadoghq.eu"

# See configuration.py for a list of all supported configuration parameters.
configuration = Configuration()

# Enter a context with an instance of the API client
with ApiClient(configuration) as api_client:
    # Create an instance of the API class
    api_instance = logs_api.LogsApi(api_client)
    body = HTTPLog([
        HTTPLogItem(
            ddsource="cert-manager",
            ddtags="env:temporary,version:1.0,kube_cluster_name:taneli,kube_namespace:cert-manager",
            hostname="aks-taneli-test",
            # message='"INFO 09:33:23.367397 1 "msg"="test message""',
            message='"E0716 09:36:23.367397 1 controller.go:158] cert-manager/controller/CertificateReadiness "msg"="re-queuing item due to error processing" "error"="Operation cannot be fulfilled on certificates.cert-manager.io \"isengard-tls-certs\": the object has been modified; please apply your changes to the latest version and try again" "key"="isengard/isengard-tls-certs""',
            service="cert-manager",
        ),
    ])  # HTTPLog | Log to send (JSON format).
    # content_encoding = ContentEncoding("gzip")  # ContentEncoding | HTTP header used to compress the media-type. (optional)
    # ddtags = "env:test,user:my-user"  # str | Log tags can be passed as query parameters with `text/plain` content type. (optional)
示例#7
0
 def submit_metrics(self, series: List[Series]) -> None:
     configuration = Configuration()
     with ApiClient(configuration) as api_client:
         api_instance = MetricsApi(api_client)
         body = MetricsPayload(series)
         api_instance.submit_metrics(body)
示例#8
0
def loop_data_dog():
    # Adding headers to the file (only do this once)
    write_log('host', 'message', 'service', 'tags', 'timestamp')
    # Reference global vars
    global has_additional_logs, log_start_at
    # Placeholder for doing a more proper API client to reference
    create_api_client()
    # If we still have more logs to process, keep looping.
    while has_additional_logs:
        # can remove this, isn't entirely necessary.
        time.sleep(1)
        # Enter a context with an instance of the API client
        with ApiClient(configuration) as api_client:
            # Create an instance of the API class
            api_instance = logs_api.LogsApi(api_client)
            request_dict = {
                # index name for historical view is generated
                # when rehydrating logs, it will be the query name
                'index':
                "january-15-2021",
                'limit':
                1000,
                'query':
                "*",
                'sort':
                LogsSort("desc"),
                # Right here... this value is important to keep track
                # of with the iteration so we know where to jump back
                # in at and where to stop.
                #'start_at': log_start_at,
                'time':
                LogsListRequestTime(
                    _from=dateutil_parser('2021-01-15T07:00:00Z'),
                    timezone="timezone_example",
                    to=dateutil_parser('2021-01-15T21:00:00Z'),
                ),
            }
            if log_start_at != '':
                request_dict['start_at'] = log_start_at
            # Define the query constraints for the logs
            body = LogsListRequest(**request_dict)
            try:
                # Get a list of logs
                api_response = api_instance.list_logs(body)
                # This is the real special sauce here... keep looping
                # until you don't find any further log IDs.
                if 'next_log_id' in api_response:
                    has_additional_logs = True
                    log_start_at = api_response['next_log_id']
                else:
                    has_additional_logs = False
                # Show the log details.
                logs = api_response['logs']
                pprint(api_response)
                for log in logs:
                    write_log(log['content']['host'],
                              log['content']['message'],
                              log['content']['service'],
                              log['content']['tags'],
                              log['content']['timestamp'])
            except ApiException as e:
                print("Exception when calling LogsApi->list_logs: %s\n" % e)
    # Here we have completed all the looping
    # and can do any cleanup work.
    print('Done, any cleanup can go here.')