def decorated_function(*args, **kwargs): if input_schema: if request.get_json(): request_data = request.get_json() else: request_data = request.args data, errors = input_schema.load(request_data) if errors: return wrap_errors(errors), 400 kwargs['data'] = data try: resp = f(*args, **kwargs) except Exception as e: sentry.captureException() current_app.logger.exception(e) return dict(message=str(e)), 500 if isinstance(resp, tuple): return resp[0], resp[1] if not resp: return dict(message="No data found"), 404 return unwrap_pagination(resp, output_schema), 200
def _get_txt_records(domain): """ Retrieve TXT records for a given domain and return list of Record Objects :param domain: FQDN :return: list of Record objects """ server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost") path = f"/api/v1/servers/{server_id}/search-data?q={domain}&max=100&object_type=record" function = sys._getframe().f_code.co_name log_data = {"function": function} try: records = _get(path) log_data["message"] = "Retrieved TXT Records Successfully" current_app.logger.debug(log_data) except Exception as e: sentry.captureException() log_data["Exception"] = e log_data["message"] = "Failed to Retrieve TXT Records" current_app.logger.debug(log_data) return [] txt_records = [] for record in records: cur_record = Record(record) txt_records.append(cur_record) return txt_records
def send_notification(event_type, data, targets, notification): """ Executes the plugin and handles failure. :param event_type: :param data: :param targets: :param notification: :return: """ status = FAILURE_METRIC_STATUS try: notification.plugin.send(event_type, data, targets, notification.options) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() metrics.send( "notification", "counter", 1, metric_tags={ "status": status, "event_type": event_type }, ) if status == SUCCESS_METRIC_STATUS: return True
def send_rotation_notification(certificate, notification_plugin=None): """ Sends a report to certificate owners when their certificate has been rotated. :param certificate: :param notification_plugin: :return: """ status = FAILURE_METRIC_STATUS if not notification_plugin: notification_plugin = plugins.get( current_app.config.get('LEMUR_DEFAULT_NOTIFICATION_PLUGIN')) data = certificate_notification_output_schema.dump(certificate).data try: notification_plugin.send('rotation', data, [data['owner']]) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() metrics.send('notification', 'counter', 1, metric_tags={ 'status': status, 'event_type': 'rotation' }) if status == SUCCESS_METRIC_STATUS: return True
def get_zones(account_number): """ Retrieve authoritative zones from the PowerDNS API and return a list of zones :param account_number: :raise: Exception :return: list of Zone Objects """ _check_conf() server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost") path = f"/api/v1/servers/{server_id}/zones" zones = [] function = sys._getframe().f_code.co_name log_data = {"function": function} try: records = _get(path) log_data["message"] = "Retrieved Zones Successfully" current_app.logger.debug(log_data) except Exception as e: sentry.captureException() log_data["message"] = "Failed to Retrieve Zone Data" current_app.logger.debug(log_data) raise for record in records: zone = Zone(record) if zone.kind == 'Master': zones.append(zone.name) return zones
def get_certificate_by_name(self, certificate_name, options): account_number = self.get_option("accountNumber", options) # certificate name may contain path, in which case we remove it if "/" in certificate_name: certificate_name = certificate_name.split('/')[-1] try: cert = iam.get_certificate(certificate_name, account_number=account_number) if cert: return dict( body=cert["CertificateBody"], chain=cert.get("CertificateChain"), name=cert["ServerCertificateMetadata"] ["ServerCertificateName"], ) except ClientError: current_app.logger.warning( "get_elb_certificate_failed: Unable to get certificate for {0}" .format(certificate_name)) sentry.captureException() metrics.send("get_elb_certificate_failed", "counter", 1, metric_tags={ "certificate_name": certificate_name, "account_number": account_number }) return None
def notify_authority_expirations(): """ This celery task notifies about expiring certificate authority certs :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "notify for certificate authority cert expiration", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_notification.authority_expirations() except SoftTimeLimitExceeded: log_data["message"] = "Notify expiring CA Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def deactivate_entrust_test_certificates(): """ This celery task attempts to deactivate all not yet deactivated Entrust certificates, and should only run in TEST :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "deactivate entrust certificates", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_certificate.deactivate_entrust_certificates() except SoftTimeLimitExceeded: log_data["message"] = "Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def certificate_reissue(): """ This celery task reissues certificates which are pending reissue :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "reissuing certificates", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_certificate.reissue(None, True) except SoftTimeLimitExceeded: log_data["message"] = "Certificate reissue: Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return log_data["message"] = "reissuance completed" current_app.logger.debug(log_data) metrics.send(f"{function}.success", "counter", 1) return log_data
def get_all_zones(): """ This celery syncs all zones from the available dns providers :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "refresh all zones from available DNS providers", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_dns_providers.get_all_zones() except SoftTimeLimitExceeded: log_data["message"] = "get all zones: Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def request_reissue(certificate, commit): """ Reissuing certificate and handles any exceptions. :param certificate: :param commit: :return: """ status = FAILURE_METRIC_STATUS try: print("[+] {0} is eligible for re-issuance".format(certificate.name)) # set the lemur identity for all cli commands identity_changed.send(current_app._get_current_object(), identity=Identity(1)) details = get_certificate_primitives(certificate) print_certificate_details(details) if commit: new_cert = reissue_certificate(certificate, replace=True) print("[+] New certificate named: {0}".format(new_cert.name)) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() print( "[!] Failed to reissue certificates. Reason: {}".format( e ) ) metrics.send('certificate_reissue', 'counter', 1, metric_tags={'status': status})
def reissue(old_certificate_name, commit): """ Reissues certificate with the same parameters as it was originally issued with. If not time period is provided, reissues certificate as valid from today to today + length of original. """ if commit: print("[!] Running in COMMIT mode.") print("[+] Starting certificate re-issuance.") try: old_cert = validate_certificate(old_certificate_name) if not old_cert: for certificate in get_all_pending_reissue(): print("[+] {0} is eligible for re-issuance".format(certificate.name)) request_reissue(certificate, commit) else: request_reissue(old_cert, commit) print("[+] Done!") except Exception as e: sentry.captureException() metrics.send('certificate_reissue_failure', 'counter', 1) print( "[!] Failed to reissue certificates. Reason: {}".format( e ) )
def worker(data, commit, reason): parts = [x for x in data.split(" ") if x] try: cert = get(int(parts[0].strip())) plugin = plugins.get(cert.authority.plugin_name) print("[+] Revoking certificate. Id: {0} Name: {1}".format( cert.id, cert.name)) if commit: plugin.revoke_certificate(cert, reason) metrics.send( "certificate_revoke", "counter", 1, metric_tags={"status": SUCCESS_METRIC_STATUS}, ) except Exception as e: sentry.captureException() metrics.send( "certificate_revoke", "counter", 1, metric_tags={"status": FAILURE_METRIC_STATUS}, ) print("[!] Failed to revoke certificates. Reason: {}".format(e))
def reissue(old_certificate_name, commit): """ Reissues certificate with the same parameters as it was originally issued with. If not time period is provided, reissues certificate as valid from today to today + length of original. """ if commit: print("[!] Running in COMMIT mode.") print("[+] Starting certificate re-issuance.") status = FAILURE_METRIC_STATUS try: old_cert = validate_certificate(old_certificate_name) if not old_cert: for certificate in get_all_pending_reissue(): request_reissue(certificate, commit) else: request_reissue(old_cert, commit) status = SUCCESS_METRIC_STATUS print("[+] Done!") except Exception as e: sentry.captureException() current_app.logger.exception("Error reissuing certificate.", exc_info=True) print("[!] Failed to reissue certificates. Reason: {}".format(e)) metrics.send("certificate_reissue_job", "counter", 1, metric_tags={"status": status})
def reissue(old_certificate_name, commit): """ Reissues certificate with the same parameters as it was originally issued with. If not time period is provided, reissues certificate as valid from today to today + length of original. """ if commit: print("[!] Running in COMMIT mode.") print("[+] Starting certificate re-issuance.") status = FAILURE_METRIC_STATUS try: old_cert = validate_certificate(old_certificate_name) if not old_cert: for certificate in get_all_pending_reissue(): request_reissue(certificate, commit) else: request_reissue(old_cert, commit) status = SUCCESS_METRIC_STATUS print("[+] Done!") except Exception as e: sentry.captureException() current_app.logger.exception("Error reissuing certificate.", exc_info=True) print( "[!] Failed to reissue certificates. Reason: {}".format( e ) ) metrics.send('certificate_reissue_job', 'counter', 1, metric_tags={'status': status})
def send_pending_failure_notification(pending_cert, notify_owner=True, notify_security=True, notification_plugin=None): """ Sends a report to certificate owners when their pending certificate failed to be created. :param pending_cert: :param notification_plugin: :return: """ status = FAILURE_METRIC_STATUS if not notification_plugin: notification_plugin = plugins.get( current_app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification")) data = pending_certificate_output_schema.dump(pending_cert).data data["security_email"] = current_app.config.get( "LEMUR_SECURITY_TEAM_EMAIL") if notify_owner: try: notification_plugin.send("failed", data, [data["owner"]], pending_cert) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.error( "Unable to send pending failure notification to {}.".format( data["owner"]), exc_info=True, ) sentry.captureException() if notify_security: try: notification_plugin.send("failed", data, data["security_email"], pending_cert) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.error( "Unable to send pending failure notification to " "{}.".format(data["security_email"]), exc_info=True, ) sentry.captureException() metrics.send( "notification", "counter", 1, metric_tags={ "status": status, "event_type": "rotation" }, ) if status == SUCCESS_METRIC_STATUS: return True
def request_reissue(certificate, commit): """ Reissuing certificate and handles any exceptions. :param certificate: :param commit: :return: """ status = FAILURE_METRIC_STATUS try: print("[+] {0} is eligible for re-issuance".format(certificate.name)) # set the lemur identity for all cli commands identity_changed.send(current_app._get_current_object(), identity=Identity(1)) details = get_certificate_primitives(certificate) print_certificate_details(details) if commit: new_cert = reissue_certificate(certificate, replace=True) print("[+] New certificate named: {0}".format(new_cert.name)) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() current_app.logger.exception("Error reissuing certificate.", exc_info=True) print( "[!] Failed to reissue certificates. Reason: {}".format( e ) ) metrics.send('certificate_reissue', 'counter', 1, metric_tags={'status': status})
def get_load_balancer_arn_from_endpoint(endpoint_name, **kwargs): """ Get a load balancer ARN from an endpoint. :param endpoint_name: :return: """ try: client = kwargs.pop("client") elbs = client.describe_load_balancers(Names=[endpoint_name]) if "LoadBalancers" in elbs and elbs["LoadBalancers"]: return elbs["LoadBalancers"][0]["LoadBalancerArn"] except Exception as e: # noqa metrics.send( "get_load_balancer_arn_from_endpoint", "counter", 1, metric_tags={ "error": str(e), "endpoint_name": endpoint_name, }, ) sentry.captureException(extra={ "endpoint_name": str(endpoint_name), }) raise
def request_certificate(self, acme_client, authorizations, order): for authorization in authorizations: for authz in authorization.authz: authorization_resource, _ = acme_client.poll(authz) deadline = datetime.datetime.now() + datetime.timedelta(seconds=360) try: orderr = acme_client.poll_and_finalize(order, deadline) except (AcmeError, TimeoutError): sentry.captureException(extra={"order_url": str(order.uri)}) metrics.send("request_certificate_error", "counter", 1, metric_tags={"uri": order.uri}) current_app.logger.error( f"Unable to resolve Acme order: {order.uri}", exc_info=True ) raise except errors.ValidationError: if order.fullchain_pem: orderr = order else: raise metrics.send("request_certificate_success", "counter", 1, metric_tags={"uri": order.uri}) current_app.logger.info( f"Successfully resolved Acme order: {order.uri}", exc_info=True ) pem_certificate, pem_certificate_chain = self.extract_cert_and_chain(orderr.fullchain_pem) current_app.logger.debug( "{0} {1}".format(type(pem_certificate), type(pem_certificate_chain)) ) return pem_certificate, pem_certificate_chain
def describe_load_balancer_policies(load_balancer_name, policy_names, **kwargs): """ Fetching all policies currently associated with an ELB. :param load_balancer_name: :return: """ try: return kwargs["client"].describe_load_balancer_policies( LoadBalancerName=load_balancer_name, PolicyNames=policy_names) except Exception as e: # noqa metrics.send( "describe_load_balancer_policies_error", "counter", 1, metric_tags={ "load_balancer_name": load_balancer_name, "policy_names": policy_names, "error": str(e), }, ) sentry.captureException( extra={ "load_balancer_name": str(load_balancer_name), "policy_names": str(policy_names), }) raise
def expirations(exclude, disabled_notification_plugins): """ Runs Lemur's notification engine, that looks for expiring certificates and sends notifications out to those that have subscribed to them. Every certificate receives notifications by default. When expiration notifications are handled outside of Lemur we exclude their names (or matching) from expiration notifications. It performs simple subset matching and is case insensitive. :return: """ status = FAILURE_METRIC_STATUS try: print("Starting to notify subscribers about expiring certificates!") success, failed = send_expiration_notifications( exclude, disabled_notification_plugins) print( f"Finished notifying subscribers about expiring certificates! Sent: {success} Failed: {failed}" ) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() metrics.send("expiration_notification_job", "counter", 1, metric_tags={"status": status})
def endpoints_expire(): """ This celery task removes all endpoints that have not been recently updated :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "endpoints expire", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_endpoints.expire(2) # Time in hours except SoftTimeLimitExceeded: log_data["message"] = "endpoint expire: Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def update_destinations(target, value, initiator): """ Attempt to upload certificate to the new destination :param target: :param value: :param initiator: :return: """ destination_plugin = plugins.get(value.plugin_name) status = FAILURE_METRIC_STATUS try: if target.private_key or not destination_plugin.requires_key: destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() raise metrics.send('destination_upload', 'counter', 1, metric_tags={ 'status': status, 'certificate': target.name, 'destination': value.label })
def check_revoked(): """ This celery task attempts to check if any certs are expired :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "check if any valid certificate is revoked", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_certificate.check_revoked() except SoftTimeLimitExceeded: log_data["message"] = "Checking revoked: Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def send_rotation_notification(certificate, notification_plugin=None): """ Sends a report to certificate owners when their certificate has been rotated. :param certificate: :param notification_plugin: :return: """ status = FAILURE_METRIC_STATUS if not notification_plugin: notification_plugin = plugins.get(current_app.config.get('LEMUR_DEFAULT_NOTIFICATION_PLUGIN')) data = certificate_notification_output_schema.dump(certificate).data try: notification_plugin.send('rotation', data, [data['owner']]) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.error('Unable to send notification to {}.'.format(data['owner']), exc_info=True) sentry.captureException() metrics.send('notification', 'counter', 1, metric_tags={'status': status, 'event_type': 'rotation'}) if status == SUCCESS_METRIC_STATUS: return True
def send_security_expiration_summary(): """ This celery task sends a summary about expiring certificates to the security team. :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "send summary for certificate expiration", "task_id": task_id, } if task_id and is_task_active(function, task_id, None): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: cli_notification.security_expiration_summary(current_app.config.get("EXCLUDE_CN_FROM_NOTIFICATION", [])) except SoftTimeLimitExceeded: log_data["message"] = "Send summary for expiring certs Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return metrics.send(f"{function}.success", "counter", 1) return log_data
def put(bucket_name, region_name, prefix, data, encrypt, **kwargs): """ Use STS to write to an S3 bucket """ bucket = kwargs["resource"].Bucket(bucket_name) current_app.logger.debug( "Persisting data to S3. Bucket: {0} Prefix: {1}".format( bucket_name, prefix)) # get data ready for writing if isinstance(data, str): data = data.encode("utf-8") if encrypt: bucket.put_object( Key=prefix, Body=data, ACL="bucket-owner-full-control", ServerSideEncryption="AES256", ) else: try: bucket.put_object(Key=prefix, Body=data, ACL="bucket-owner-full-control") return True except ClientError: sentry.captureException() return False
def health(): try: if healthcheck(db): return 'ok' except Exception: sentry.captureException() return 'db check failed'
def reissue(old_certificate_name, commit): """ Reissues certificate with the same parameters as it was originally issued with. If not time period is provided, reissues certificate as valid from today to today + length of original. """ if commit: print("[!] Running in COMMIT mode.") print("[+] Starting certificate re-issuance.") try: old_cert = validate_certificate(old_certificate_name) if not old_cert: for certificate in get_all_pending_reissue(): print("[+] {0} is eligible for re-issuance".format( certificate.name)) request_reissue(certificate, commit) else: request_reissue(old_cert, commit) print("[+] Done!") except Exception as e: sentry.captureException() metrics.send('certificate_reissue_failure', 'counter', 1) print("[!] Failed to reissue certificates. Reason: {}".format(e))
def expirations(exclude): """ Runs Lemur's notification engine, that looks for expired certificates and sends notifications out to those that have subscribed to them. Every certificate receives notifications by default. When expiration notifications are handled outside of Lemur we exclude their names (or matching) from expiration notifications. It performs simple subset matching and is case insensitive. :return: """ status = FAILURE_METRIC_STATUS try: print("Starting to notify subscribers about expiring certificates!") success, failed = send_expiration_notifications(exclude) print( "Finished notifying subscribers about expiring certificates! Sent: {success} Failed: {failed}".format( success=success, failed=failed ) ) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() metrics.send('expiration_notification_job', 'counter', 1, metric_tags={'status': status})
def request_certificate(self, acme_client, authorizations, order): for authorization in authorizations: for authz in authorization.authz: authorization_resource, _ = acme_client.poll(authz) deadline = datetime.datetime.now() + datetime.timedelta(seconds=360) try: orderr = acme_client.poll_and_finalize(order, deadline) except (AcmeError, TimeoutError): sentry.captureException(extra={"order_url": str(order.uri)}) metrics.send("request_certificate_error", "counter", 1) current_app.logger.error( f"Unable to resolve Acme order: {order.uri}", exc_info=True) raise except errors.ValidationError: if order.fullchain_pem: orderr = order else: raise pem_certificate = OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, orderr.fullchain_pem), ).decode() pem_certificate_chain = orderr.fullchain_pem[len(pem_certificate ): # noqa ].lstrip() current_app.logger.debug("{0} {1}".format(type(pem_certificate), type(pem_certificate_chain))) return pem_certificate, pem_certificate_chain
def delete_txt_record(change_id, account_number, domain, token): """ Delete the TXT record for the given domain and token """ _check_conf() zone_name = _get_zone_name(domain, account_number) server_id = current_app.config.get("ACME_POWERDNS_SERVERID", "localhost") zone_id = zone_name + "." domain_id = domain + "." path = f"/api/v1/servers/{server_id}/zones/{zone_id}" payload = { "rrsets": [{ "name": domain_id, "type": "TXT", "ttl": 300, "changetype": "DELETE", "records": [{ "content": f"\"{token}\"", "disabled": False }], "comments": [] }] } function = sys._getframe().f_code.co_name log_data = {"function": function, "fqdn": domain, "token": token} try: _patch(path, payload) log_data["message"] = "TXT record successfully deleted" current_app.logger.debug(log_data) except Exception as e: sentry.captureException() log_data["Exception"] = e log_data["message"] = "Unable to delete TXT record" current_app.logger.debug(log_data)
def decorated_function(*args, **kwargs): if input_schema: if request.get_json(): request_data = request.get_json() else: request_data = request.args data, errors = input_schema.load(request_data) if errors: return wrap_errors(errors), 400 kwargs['data'] = data try: resp = f(*args, **kwargs) except Exception as e: sentry.captureException() current_app.logger.exception(e) return dict(message=str(e)), 500 if isinstance(resp, tuple): return resp[0], resp[1] if not resp: return dict(message="No data found"), 404 return unwrap_pagination(resp, output_schema), 200
def get_listener_arn_from_endpoint(endpoint_name, endpoint_port, **kwargs): """ Get a listener ARN from an endpoint. :param endpoint_name: :param endpoint_port: :return: """ try: client = kwargs.pop("client") elbs = client.describe_load_balancers(Names=[endpoint_name]) for elb in elbs["LoadBalancers"]: listeners = client.describe_listeners( LoadBalancerArn=elb["LoadBalancerArn"]) for listener in listeners["Listeners"]: if listener["Port"] == endpoint_port: return listener["ListenerArn"] except Exception as e: # noqa metrics.send( "get_listener_arn_from_endpoint_error", "counter", 1, metric_tags={ "error": str(e), "endpoint_name": endpoint_name, "endpoint_port": endpoint_port, }, ) sentry.captureException( extra={ "endpoint_name": str(endpoint_name), "endpoint_port": str(endpoint_port), }) raise
def clean_source(source): """ This celery task will clean the specified source. This is a destructive operation that will delete unused certificates from each source. :param source: :return: """ function = f"{__name__}.{sys._getframe().f_code.co_name}" task_id = None if celery.current_task: task_id = celery.current_task.request.id log_data = { "function": function, "message": "Cleaning source", "source": source, "task_id": task_id, } if task_id and is_task_active(function, task_id, (source,)): log_data["message"] = "Skipping task: Task is already active" current_app.logger.debug(log_data) return current_app.logger.debug(log_data) try: clean([source], True) except SoftTimeLimitExceeded: log_data["message"] = "Clean source: Time limit exceeded." current_app.logger.error(log_data) sentry.captureException() metrics.send("celery.timeout", "counter", 1, metric_tags={"function": function}) return log_data
def retry_throttled(exception): """ Determines if this exception is due to throttling :param exception: :return: """ # Log details about the exception try: raise exception except Exception as e: current_app.logger.error("ELB retry_throttled triggered", exc_info=True) metrics.send("elb_retry", "counter", 1, metric_tags={"exception": str(e)}) sentry.captureException() if isinstance(exception, botocore.exceptions.ClientError): if exception.response["Error"]["Code"] == "LoadBalancerNotFound": return False if exception.response["Error"]["Code"] == "CertificateNotFound": return False return True
def cleanup_after_revoke(certificate): """ Perform the needed cleanup for a revoked certificate. This includes - 1. Disabling notification 2. Disabling auto-rotation 3. Update certificate status to 'revoked' 4. Remove from AWS :param certificate: Certificate object to modify and update in DB :return: None """ certificate.notify = False certificate.rotation = False certificate.status = 'revoked' error_message = "" for destination in list(certificate.destinations): try: remove_from_destination(certificate, destination) certificate.destinations.remove(destination) except Exception as e: # This cleanup is the best-effort since certificate is already revoked at this point. # We will capture the exception and move on to the next destination sentry.captureException() error_message = error_message + f"Failed to remove destination: {destination.label}. {str(e)}. " database.update(certificate) return error_message
def extensions(self): # setup default values return_extensions = { 'sub_alt_names': {'names': []} } try: cert = lemur.common.utils.parse_certificate(self.body) for extension in cert.extensions: value = extension.value if isinstance(value, x509.BasicConstraints): return_extensions['basic_constraints'] = value elif isinstance(value, x509.SubjectAlternativeName): return_extensions['sub_alt_names']['names'] = value elif isinstance(value, x509.ExtendedKeyUsage): return_extensions['extended_key_usage'] = value elif isinstance(value, x509.KeyUsage): return_extensions['key_usage'] = value elif isinstance(value, x509.SubjectKeyIdentifier): return_extensions['subject_key_identifier'] = {'include_ski': True} elif isinstance(value, x509.AuthorityInformationAccess): return_extensions['certificate_info_access'] = {'include_aia': True} elif isinstance(value, x509.AuthorityKeyIdentifier): aki = { 'use_key_identifier': False, 'use_authority_cert': False } if value.key_identifier: aki['use_key_identifier'] = True if value.authority_cert_issuer: aki['use_authority_cert'] = True return_extensions['authority_key_identifier'] = aki # TODO: Don't support CRLDistributionPoints yet https://github.com/Netflix/lemur/issues/662 elif isinstance(value, x509.CRLDistributionPoints): current_app.logger.warning('CRLDistributionPoints not yet supported for clone operation.') # TODO: Not supporting custom OIDs yet. https://github.com/Netflix/lemur/issues/665 else: current_app.logger.warning('Custom OIDs not yet supported for clone operation.') except InvalidCodepoint as e: sentry.captureException() current_app.logger.warning('Unable to parse extensions due to underscore in dns name') except ValueError as e: sentry.captureException() current_app.logger.warning('Unable to parse') current_app.logger.exception(e) return return_extensions
def get_elbs(**kwargs): """ Fetches one page elb objects for a given account and region. """ try: client = kwargs.pop('client') return client.describe_load_balancers(**kwargs) except Exception as e: # noqa metrics.send('get_elbs_error', 'counter', 1, metric_tags={"error": e}) sentry.captureException() raise
def describe_ssl_policies_v2(policy_names, **kwargs): """ Fetching all policies currently associated with an ELB. :param policy_names: :return: """ try: return kwargs['client'].describe_ssl_policies(Names=policy_names) except Exception as e: # noqa metrics.send('describe_ssl_policies_v2_error', 'counter', 1, metric_tags={"policy_names": policy_names, "error": e}) sentry.captureException(extra={"policy_names": str(policy_names)}) raise
def describe_listeners_v2(**kwargs): """ Fetches one page of listener objects for a given elb arn. :param kwargs: :return: """ try: client = kwargs.pop('client') return client.describe_listeners(**kwargs) except Exception as e: # noqa metrics.send('describe_listeners_v2_error', 'counter', 1, metric_tags={"error": e}) sentry.captureException() raise
def rotate(endpoint_name, new_certificate_name, old_certificate_name, message, commit): """ Rotates an endpoint and reissues it if it has not already been replaced. If it has been replaced, will use the replacement certificate for the rotation. """ if commit: print("[!] Running in COMMIT mode.") print("[+] Starting endpoint rotation.") status = FAILURE_METRIC_STATUS try: old_cert = validate_certificate(old_certificate_name) new_cert = validate_certificate(new_certificate_name) endpoint = validate_endpoint(endpoint_name) if endpoint and new_cert: print("[+] Rotating endpoint: {0} to certificate {1}".format(endpoint.name, new_cert.name)) request_rotation(endpoint, new_cert, message, commit) elif old_cert and new_cert: print("[+] Rotating all endpoints from {0} to {1}".format(old_cert.name, new_cert.name)) for endpoint in old_cert.endpoints: print("[+] Rotating {0}".format(endpoint.name)) request_rotation(endpoint, new_cert, message, commit) else: print("[+] Rotating all endpoints that have new certificates available") for endpoint in endpoint_service.get_all_pending_rotation(): if len(endpoint.certificate.replaced) == 1: print("[+] Rotating {0} to {1}".format(endpoint.name, endpoint.certificate.replaced[0].name)) request_rotation(endpoint, endpoint.certificate.replaced[0], message, commit) else: metrics.send('endpoint_rotation', 'counter', 1, metric_tags={'status': FAILURE_METRIC_STATUS}) print("[!] Failed to rotate endpoint {0} reason: Multiple replacement certificates found.".format( endpoint.name )) status = SUCCESS_METRIC_STATUS print("[+] Done!") except Exception as e: sentry.captureException() metrics.send('endpoint_rotation_job', 'counter', 1, metric_tags={'status': status})
def create(**kwargs): """ Creates a new certificate. """ try: cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs) except Exception: current_app.logger.error("Exception minting certificate", exc_info=True) sentry.captureException() raise kwargs['body'] = cert_body kwargs['private_key'] = private_key kwargs['chain'] = cert_chain kwargs['external_id'] = external_id kwargs['csr'] = csr roles = create_certificate_roles(**kwargs) if kwargs.get('roles'): kwargs['roles'] += roles else: kwargs['roles'] = roles if cert_body: cert = Certificate(**kwargs) kwargs['creator'].certificates.append(cert) else: cert = PendingCertificate(**kwargs) kwargs['creator'].pending_certificates.append(cert) cert.authority = kwargs['authority'] database.commit() if isinstance(cert, Certificate): certificate_issued.send(certificate=cert, authority=cert.authority) metrics.send('certificate_issued', 'counter', 1, metric_tags=dict(owner=cert.owner, issuer=cert.issuer)) if isinstance(cert, PendingCertificate): # We need to refresh the pending certificate to avoid "Instance is not bound to a Session; " # "attribute refresh operation cannot proceed" pending_cert = database.session_query(PendingCertificate).get(cert.id) from lemur.common.celery import fetch_acme_cert if not current_app.config.get("ACME_DISABLE_AUTORESOLVE", False): fetch_acme_cert.apply_async((pending_cert.id,), countdown=5) return cert
def describe_load_balancer_policies(load_balancer_name, policy_names, **kwargs): """ Fetching all policies currently associated with an ELB. :param load_balancer_name: :return: """ try: return kwargs['client'].describe_load_balancer_policies(LoadBalancerName=load_balancer_name, PolicyNames=policy_names) except Exception as e: # noqa metrics.send('describe_load_balancer_policies_error', 'counter', 1, metric_tags={"load_balancer_name": load_balancer_name, "policy_names": policy_names, "error": e}) sentry.captureException(extra={"load_balancer_name": str(load_balancer_name), "policy_names": str(policy_names)}) raise
def send_notification(event_type, data, targets, notification): """ Executes the plugin and handles failure. :param event_type: :param data: :param targets: :param notification: :return: """ try: notification.plugin.send(event_type, data, targets, notification.options) metrics.send('{0}_notification_sent'.format(event_type), 'counter', 1) return True except Exception as e: sentry.captureException() metrics.send('{0}_notification_failure'.format(event_type), 'counter', 1) current_app.logger.exception(e)
def clean(source_strings, commit): sources = validate_sources(source_strings) for source in sources: s = plugins.get(source.plugin_name) if not hasattr(s, 'clean'): print("Cannot clean source: {0}, source plugin does not implement 'clean()'".format( source.label )) continue start_time = time.time() print("[+] Staring to clean source: {label}!\n".format(label=source.label)) cleaned = 0 for certificate in certificate_service.get_all_pending_cleaning(source): status = FAILURE_METRIC_STATUS if commit: try: s.clean(certificate, source.options) certificate.sources.remove(source) certificate_service.database.update(certificate) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.exception(e) sentry.captureException() metrics.send('clean', 'counter', 1, metric_tags={'source': source.label, 'status': status}) current_app.logger.warning("Removed {0} from source {1} during cleaning".format( certificate.name, source.label )) cleaned += 1 print( "[+] Finished cleaning source: {label}. Removed {cleaned} certificates from source. Run Time: {time}\n".format( label=source.label, time=(time.time() - start_time), cleaned=cleaned ) )
def sync(source_strings): sources = validate_sources(source_strings) for source in sources: status = FAILURE_METRIC_STATUS start_time = time.time() print("[+] Staring to sync source: {label}!\n".format(label=source.label)) user = user_service.get_by_username('lemur') try: data = source_service.sync(source, user) print( "[+] Certificates: New: {new} Updated: {updated}".format( new=data['certificates'][0], updated=data['certificates'][1] ) ) print( "[+] Endpoints: New: {new} Updated: {updated}".format( new=data['endpoints'][0], updated=data['endpoints'][1] ) ) print( "[+] Finished syncing source: {label}. Run Time: {time}".format( label=source.label, time=(time.time() - start_time) ) ) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.exception(e) print( "[X] Failed syncing source {label}!\n".format(label=source.label) ) sentry.captureException() metrics.send('source_sync_fail', 'counter', 1, metric_tags={'source': source.label, 'status': status}) metrics.send('source_sync', 'counter', 1, metric_tags={'source': source.label, 'status': status})
def expire(ttl): """ Removed all endpoints that have not been recently updated. """ print("[+] Staring expiration of old endpoints.") try: now = arrow.utcnow() expiration = now - timedelta(hours=ttl) endpoints = database.session_query(Endpoint).filter(cast(Endpoint.last_updated, ArrowType) <= expiration) for endpoint in endpoints: print("[!] Expiring endpoint: {name} Last Updated: {last_updated}".format(name=endpoint.name, last_updated=endpoint.last_updated)) database.delete(endpoint) metrics.send('endpoint_expired', 'counter', 1) print("[+] Finished expiration.") except Exception as e: sentry.captureException()
def worker(data, commit, reason): parts = [x for x in data.split(' ') if x] try: cert = get(int(parts[0].strip())) plugin = plugins.get(cert.authority.plugin_name) print('[+] Revoking certificate. Id: {0} Name: {1}'.format(cert.id, cert.name)) if commit: plugin.revoke_certificate(cert, reason) metrics.send('certificate_revoke', 'counter', 1, metric_tags={'status': SUCCESS_METRIC_STATUS}) except Exception as e: sentry.captureException() metrics.send('certificate_revoke', 'counter', 1, metric_tags={'status': FAILURE_METRIC_STATUS}) print( "[!] Failed to revoke certificates. Reason: {}".format( e ) )
def send_pending_failure_notification(pending_cert, notify_owner=True, notify_security=True, notification_plugin=None): """ Sends a report to certificate owners when their pending certificate failed to be created. :param pending_cert: :param notification_plugin: :return: """ status = FAILURE_METRIC_STATUS if not notification_plugin: notification_plugin = plugins.get( current_app.config.get('LEMUR_DEFAULT_NOTIFICATION_PLUGIN', 'email-notification') ) data = pending_certificate_output_schema.dump(pending_cert).data data["security_email"] = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL') if notify_owner: try: notification_plugin.send('failed', data, [data['owner']], pending_cert) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.error('Unable to send pending failure notification to {}.'.format(data['owner']), exc_info=True) sentry.captureException() if notify_security: try: notification_plugin.send('failed', data, data["security_email"], pending_cert) status = SUCCESS_METRIC_STATUS except Exception as e: current_app.logger.error('Unable to send pending failure notification to ' '{}.'.format(data['security_email']), exc_info=True) sentry.captureException() metrics.send('notification', 'counter', 1, metric_tags={'status': status, 'event_type': 'rotation'}) if status == SUCCESS_METRIC_STATUS: return True
def send_rotation_notification(certificate, notification_plugin=None): """ Sends a report to certificate owners when their certificate as been rotated. :param certificate: :return: """ if not notification_plugin: notification_plugin = plugins.get(current_app.config.get('LEMUR_DEFAULT_NOTIFICATION_PLUGIN')) data = certificate_notification_output_schema.dump(certificate).data try: notification_plugin.send('rotation', data, [data['owner']]) metrics.send('rotation_notification_sent', 'counter', 1) return True except Exception as e: sentry.captureException() metrics.send('rotation_notification_failure', 'counter', 1) current_app.logger.exception(e)
def update_destinations(target, value, initiator): """ Attempt to upload certificate to the new destination :param target: :param value: :param initiator: :return: """ destination_plugin = plugins.get(value.plugin_name) status = FAILURE_METRIC_STATUS try: if target.private_key or not destination_plugin.requires_key: destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() raise metrics.send('destination_upload', 'counter', 1, metric_tags={'status': status, 'certificate': target.name, 'destination': value.label})
def send_notification(event_type, data, targets, notification): """ Executes the plugin and handles failure. :param event_type: :param data: :param targets: :param notification: :return: """ status = FAILURE_METRIC_STATUS try: notification.plugin.send(event_type, data, targets, notification.options) status = SUCCESS_METRIC_STATUS except Exception as e: sentry.captureException() metrics.send('notification', 'counter', 1, metric_tags={'status': status, 'event_type': event_type}) if status == SUCCESS_METRIC_STATUS: return True
def get_listener_arn_from_endpoint(endpoint_name, endpoint_port, **kwargs): """ Get a listener ARN from an endpoint. :param endpoint_name: :param endpoint_port: :return: """ try: client = kwargs.pop('client') elbs = client.describe_load_balancers(Names=[endpoint_name]) for elb in elbs['LoadBalancers']: listeners = client.describe_listeners(LoadBalancerArn=elb['LoadBalancerArn']) for listener in listeners['Listeners']: if listener['Port'] == endpoint_port: return listener['ListenerArn'] except Exception as e: # noqa metrics.send('get_listener_arn_from_endpoint_error', 'counter', 1, metric_tags={"error": e, "endpoint_name": endpoint_name, "endpoint_port": endpoint_port}) sentry.captureException(extra={"endpoint_name": str(endpoint_name), "endpoint_port": str(endpoint_port)}) raise
def get_all_elbs_v2(**kwargs): """ Fetches all elbs for a given account/region :param kwargs: :return: """ elbs = [] try: while True: response = get_elbs_v2(**kwargs) elbs += response['LoadBalancers'] if not response.get('NextMarker'): return elbs else: kwargs.update(dict(Marker=response['NextMarker'])) except Exception as e: # noqa metrics.send('get_all_elbs_v2_error', 'counter', 1) sentry.captureException() raise
def retry_throttled(exception): """ Determines if this exception is due to throttling :param exception: :return: """ # Log details about the exception try: raise exception except Exception as e: current_app.logger.error("ELB retry_throttled triggered", exc_info=True) metrics.send('elb_retry', 'counter', 1, metric_tags={"exception": e}) sentry.captureException() if isinstance(exception, botocore.exceptions.ClientError): if exception.response['Error']['Code'] == 'LoadBalancerNotFound': return False if exception.response['Error']['Code'] == 'CertificateNotFound': return False return True
def check_revoked(): """ Function attempts to update Lemur's internal cache with revoked certificates. This is called periodically by Lemur. It checks both CRLs and OCSP to see if a certificate is revoked. If Lemur is unable encounters an issue with verification it marks the certificate status as `unknown`. """ for cert in get_all_certs(): try: if cert.chain: status = verify_string(cert.body, cert.chain) else: status = verify_string(cert.body, "") cert.status = 'valid' if status else 'revoked' except Exception as e: sentry.captureException() current_app.logger.exception(e) cert.status = 'unknown' database.update(cert)