def fire_event(event_type, payload=None): if config.DISABLE_EVENTS: return global SENDER_THREAD if not SENDER_THREAD: SENDER_THREAD = FuncThread(poll_and_send_messages, {}) SENDER_THREAD.start() api_key = read_api_key_safe() if not api_key: # only store events if API key has been specified return from localstack.utils.testutil import ( # leave here to avoid circular dependency is_local_test_mode, ) if payload is None: payload = {} if isinstance(payload, dict): if is_travis(): payload["travis"] = True if is_local_test_mode(): payload["int"] = True event = AnalyticsEvent(event_type=event_type, payload=payload, api_key=api_key) EVENT_QUEUE.put_nowait(event)
def __init__(self, events_file, callback, ready_mutex=None, fh_d_stream=None): FuncThread.__init__(self, self.retrieve_loop, None) self.running = True self.events_file = events_file self.callback = callback self.ready_mutex = ready_mutex self.fh_d_stream = fh_d_stream
def __init__(self, async_func_gen=None, loop=None): """ Pass a function that receives an event loop instance and a shutdown event, and returns an async function. """ FuncThread.__init__(self, self.run_func, None) self.async_func_gen = async_func_gen self.loop = loop self.shutdown_event = None
def start_api_server_locally(request): if localstack_config.FORWARD_EDGE_INMEM: if "__started__" in API_SERVERS: return API_SERVERS["__started__"] = True api = request.get("api") port = request.get("port") if api in API_SERVERS: return API_SERVERS[api] result = API_SERVERS[api] = {} def thread_func(params): if localstack_config.FORWARD_EDGE_INMEM: return moto_server.main( ["-p", str(port), "-H", constants.BIND_HOST]) return moto_server.main( [api, "-p", str(port), "-H", constants.BIND_HOST]) thread = FuncThread(thread_func) thread.start() TMP_THREADS.append(thread) result["port"] = port result["thread"] = thread return result
def publish_event(event_type, payload=None): global SENDER_THREAD if not SENDER_THREAD: SENDER_THREAD = FuncThread(poll_and_send_messages, {}) SENDER_THREAD.start() event = AnalyticsEvent(event_type=event_type, payload=payload) EVENT_QUEUE.put_nowait(event)
def start_local_api(name, port, method, asynchronous=False): print('Starting mock %s service (%s port %s)...' % (name, get_service_protocol(), port)) if asynchronous: thread = FuncThread(method, port, quiet=True) thread.start() TMP_THREADS.append(thread) return thread else: method(port)
def start_local_api(name, port, method, asynchronous=False): print('Starting mock %s service in %s ports %s (recommended) and %s (deprecated)...' % ( name, get_service_protocol(), config.EDGE_PORT, port)) if asynchronous: thread = FuncThread(method, port, quiet=True) thread.start() TMP_THREADS.append(thread) return thread else: method(port)
def __init__(self, port, forward_host=None, ssl=False, update_listener=None, quiet=False, params={}): FuncThread.__init__(self, self.run_cmd, params, quiet=quiet) self.httpd = None self.port = port self.ssl = ssl self.quiet = quiet self.forward_host = forward_host self.update_listener = update_listener # Required to enable 'Connection: keep-alive' for S3 uploads self.protocol_version = params.get('protocol_version') or 'HTTP/1.1'
class JobScheduler(object): _instance = None def __init__(self): self.jobs = [] self.thread = None def add_job(self, job_func, schedule, enabled=True): job = Job(job_func, schedule, enabled=enabled) self.jobs.append(job) return job.job_id def disable_job(self, job_id): for job in self.jobs: if job.job_id == job_id: job.is_enabled = False break def cancel_job(self, job_id): i = 0 while i < len(self.jobs): if self.jobs[i].job_id == job_id: del self.jobs[i] else: i += 1 def loop(self, *args): while True: try: for job in list(self.jobs): job.run() except Exception: pass # This is a simple heuristic to cause the loop to run apprx every minute # TODO: we should keep track of jobs execution times, to avoid duplicate executions time.sleep(59.9) def start_loop(self): self.thread = FuncThread(self.loop) self.thread.start() @classmethod def instance(cls): if not cls._instance: cls._instance = JobScheduler() return cls._instance @classmethod def start(cls): instance = cls.instance() if not instance.thread: instance.start_loop() return instance
def publish_event(event_type, payload=None): global SENDER_THREAD if not SENDER_THREAD: SENDER_THREAD = FuncThread(poll_and_send_messages, {}) SENDER_THREAD.start() if payload is None: payload = {} if isinstance(payload, dict) and is_travis(): payload['travis'] = True event = AnalyticsEvent(event_type=event_type, payload=payload) EVENT_QUEUE.put_nowait(event)
def __init__(self, port, forward_host, update_listener=None, quiet=False, params={}): FuncThread.__init__(self, self.run_cmd, params, quiet=quiet) self.httpd = None self.port = port self.quiet = quiet self.forward_host = forward_host self.update_listener = update_listener
def retrieve_loop(self, params): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(self.events_file) sock.listen(1) if self.ready_mutex: self.ready_mutex.release() while self.running: try: conn, client_addr = sock.accept() thread = FuncThread(self.handle_connection, conn) thread.start() except Exception as e: LOGGER.error('Error dispatching client request: %s %s' % (e, traceback.format_exc())) sock.close()
def start_cloudformation(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_CLOUDFORMATION backend_port = DEFAULT_PORT_CLOUDFORMATION_BACKEND print('Starting mock CloudFormation (%s port %s)...' % (get_service_protocol(), port)) start_proxy_for_service('cloudformation', port, backend_port, update_listener) if RUN_SERVER_IN_PROCESS: cmd = 'python "%s" cloudformation -p %s -H 0.0.0.0' % (__file__, backend_port) env_vars = {'PYTHONPATH': ':'.join(sys.path)} return do_run(cmd, asynchronous, env_vars=env_vars) else: argv = ['cloudformation', '-p', str(backend_port), '-H', '0.0.0.0'] thread = FuncThread(start_up, argv) thread.start() return thread
def fire_event(event_type, payload=None): global SENDER_THREAD if not SENDER_THREAD: SENDER_THREAD = FuncThread(poll_and_send_messages, {}) SENDER_THREAD.start() if payload is None: payload = {} if isinstance(payload, dict): if is_travis(): payload['travis'] = True if os.environ.get(ENV_INTERNAL_TEST_RUN): payload['int'] = True event = AnalyticsEvent(event_type=event_type, payload=payload) EVENT_QUEUE.put_nowait(event)
def create_domain(): data = json.loads(to_str(request.data)) domain_name = data['DomainName'] if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data data['Created'] = False def do_start(*args): # start actual Elasticsearch instance version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION start_elasticsearch_instance(version=version) data['Created'] = True # start ES instance in the background FuncThread(do_start).start() # sleep a short while, then return time.sleep(5) result = get_domain_status(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_CREATE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def run_dependencies_deployment_loop(stack, action): def set_status(status): stack._add_stack_event(status) stack.status = status def run_loop(*args): # NOTE: We're adding this additional loop, as it seems that in some cases moto # does not consider resource dependencies (e.g., if a "DependsOn" resource property # is defined). This loop allows us to incrementally resolve such dependencies. resource_map = stack.resource_map unresolved = {} for i in range(MAX_DEPENDENCY_DEPTH): LOG.debug('Running CloudFormation stack deployment loop iteration %s' % (i + 1)) unresolved = getattr(resource_map, '_unresolved_resources', {}) if not unresolved: set_status('%s_COMPLETE' % action) return resource_map resource_map._unresolved_resources = {} for resource_id, resource_details in unresolved.items(): # Re-trigger the resource creation parse_and_create_resource(*resource_details, force_create=True) if unresolved.keys() == resource_map._unresolved_resources.keys(): # looks like no more resources can be resolved -> bail LOG.warning('Unresolvable dependencies, there may be undeployed stack resources: %s' % unresolved) break set_status('%s_FAILED' % action) raise Exception('Unable to resolve all CloudFormation resources after traversing ' + 'dependency tree (maximum depth %s reached): %s' % (MAX_DEPENDENCY_DEPTH, list(unresolved.keys()))) # NOTE: We're running the loop in the background, as it might take some time to complete FuncThread(run_loop).start()
def start_api_server_locally(request): api = request.get('api') port = request.get('port') if api in API_SERVERS: return API_SERVERS[api] result = API_SERVERS[api] = {} def thread_func(params): return moto_main([api, '-p', str(port), '-H', constants.BIND_HOST]) thread = FuncThread(thread_func) thread.start() TMP_THREADS.append(thread) result['port'] = port result['thread'] = thread return result
def __init__(self, port, forward_url=None, ssl=False, host=None, update_listener=None, quiet=False, params={}): FuncThread.__init__(self, self.run_cmd, params, quiet=quiet) self.httpd = None self.port = port self.ssl = ssl self.quiet = quiet if forward_url: if '://' not in forward_url: forward_url = 'http://%s' % forward_url forward_url = forward_url.rstrip('/') self.forward_url = forward_url self.update_listener = update_listener self.server_stopped = False # Required to enable 'Connection: keep-alive' for S3 uploads self.protocol_version = params.get('protocol_version') or 'HTTP/1.1' self.listen_host = host or ''
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False): def do_execute(*args): # set the invocation time in milliseconds invocation_time = int(time.time() * 1000) # start the execution try: result, log_output = self._execute(func_arn, func_details, event, context, version) except Exception as e: if asynchronous: lambda_error_to_dead_letter_queue(func_details, event, e) raise e finally: self.function_invoke_times[func_arn] = invocation_time # forward log output to cloudwatch logs self._store_logs(func_details, log_output, invocation_time) # return final result return result, log_output # Inform users about asynchronous mode of the lambda execution. if asynchronous: LOG.debug( 'Lambda executed in Event (asynchronous) mode, no response from this ' 'function will be returned to caller') FuncThread(do_execute).start() return None, 'Lambda executed asynchronously.' return do_execute()
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False, callback=None): def do_execute(*args): @cloudwatched('lambda') def _run(func_arn=None): # set the invocation time in milliseconds invocation_time = int(time.time() * 1000) # start the execution raised_error = None result = None dlq_sent = None try: result = self._execute(func_arn, func_details, event, context, version) except Exception as e: raised_error = e if asynchronous: if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS: sqs_queue_arn = get_from_event( event, 'eventSourceARN') if sqs_queue_arn: # event source is SQS, send event back to dead letter queue dlq_sent = sqs_error_to_dead_letter_queue( sqs_queue_arn, event, e) else: # event source is not SQS, send back to lambda dead letter queue lambda_error_to_dead_letter_queue( func_details, event, e) raise e finally: self.function_invoke_times[func_arn] = invocation_time callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent) # return final result return result return _run(func_arn=func_arn) # Inform users about asynchronous mode of the lambda execution. if asynchronous: LOG.debug( 'Lambda executed in Event (asynchronous) mode, no response will be returned to caller' ) FuncThread(do_execute).start() return InvocationResult( None, log_output='Lambda executed asynchronously.') return do_execute()
def fire_event(event_type, payload=None): global SENDER_THREAD if not SENDER_THREAD: SENDER_THREAD = FuncThread(poll_and_send_messages, {}) SENDER_THREAD.start() api_key = read_api_key_safe() if not api_key: # only store events if API key has been specified return if payload is None: payload = {} if isinstance(payload, dict): if is_travis(): payload['travis'] = True if os.environ.get(ENV_INTERNAL_TEST_RUN): payload['int'] = True event = AnalyticsEvent(event_type=event_type, payload=payload, api_key=api_key) EVENT_QUEUE.put_nowait(event)
def __init__(self, params): FuncThread.__init__(self, self.start_reading, params) self.buffer = [] self.params = params # number of lines that make up a single log entry self.buffer_size = 2 # determine log level self.log_level = params.get('level') # get log subscribers self.log_subscribers = params.get('log_subscribers', []) if self.log_level is None: self.log_level = DEFAULT_KCL_LOG_LEVEL if self.log_level > 0: levels = OutputReaderThread.get_log_level_names(self.log_level) # regular expression to filter the printed output self.filter_regex = r'.*(%s):.*' % ('|'.join(levels)) # create prefix and logger self.prefix = params.get('log_prefix') or 'LOG' self.logger = logging.getLogger(self.prefix) self.logger.severe = self.logger.critical self.logger.fatal = self.logger.critical self.logger.setLevel(self.log_level)
def setup_package(): try: os.environ[ENV_INTERNAL_TEST_RUN] = '1' # disable SSL verification for local tests safe_requests.verify_ssl = False # start profiling FuncThread(start_profiling).start() # start infrastructure services infra.start_infra(asynchronous=True) except Exception as e: # make sure to tear down the infrastructure infra.stop_infra() raise e
def __init__(self, params): FuncThread.__init__(self, self.start_reading, params) self.running = True self.buffer = [] self.params = params # number of lines that make up a single log entry self.buffer_size = 2 # determine log level self.log_level = params.get('level') # get log subscribers self.log_subscribers = params.get('log_subscribers', []) if self.log_level is None: self.log_level = DEFAULT_KCL_LOG_LEVEL if self.log_level > 0: levels = OutputReaderThread.get_log_level_names(self.log_level) # regular expression to filter the printed output self.filter_regex = r'.*(%s):.*' % ('|'.join(levels)) # create prefix and logger self.prefix = params.get('log_prefix') or 'LOG' self.logger = logging.getLogger(self.prefix) self.logger.severe = self.logger.critical self.logger.fatal = self.logger.critical self.logger.setLevel(self.log_level)
def start_api_server_locally(request): if localstack_config.FORWARD_EDGE_INMEM: if '__started__' in API_SERVERS: return API_SERVERS['__started__'] = True api = request.get('api') port = request.get('port') if api in API_SERVERS: return API_SERVERS[api] result = API_SERVERS[api] = {} def thread_func(params): if localstack_config.FORWARD_EDGE_INMEM: return moto_server.main(['-p', str(port), '-H', constants.BIND_HOST]) return moto_server.main([api, '-p', str(port), '-H', constants.BIND_HOST]) thread = FuncThread(thread_func) thread.start() TMP_THREADS.append(thread) result['port'] = port result['thread'] = thread return result
def setup_package(): try: os.environ[ENV_INTERNAL_TEST_RUN] = '1' # disable SSL verification for local tests safe_requests.verify_ssl = False # start profiling FuncThread(start_profiling).start() # start infrastructure services infra.start_infra(asynchronous=True) # initialize certain tests asynchronously to reduce overall test time TestTerraform.init_async() except Exception as e: # make sure to tear down the infrastructure infra.stop_infra() raise e
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False): def do_execute(*args): # set the invocation time in milliseconds invocation_time = int(time.time() * 1000) # start the execution try: result = self._execute(func_arn, func_details, event, context, version) except Exception as e: if asynchronous: if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS: sqs_queue_arn = get_from_event(event, 'eventSourceARN') if sqs_queue_arn: # event source is SQS, send event back to dead letter queue sqs_error_to_dead_letter_queue( sqs_queue_arn, event, e) else: # event source is not SQS, send back to lambda dead letter queue lambda_error_to_dead_letter_queue( func_details, event, e) raise e finally: self.function_invoke_times[func_arn] = invocation_time # return final result return result # Inform users about asynchronous mode of the lambda execution. if asynchronous: LOG.debug( 'Lambda executed in Event (asynchronous) mode, no response from this ' 'function will be returned to caller') FuncThread(do_execute).start() return None, 'Lambda executed asynchronously.' return do_execute()
def create_domain(): from localstack.services.es import es_starter data = json.loads(to_str(request.data)) domain_name = data['DomainName'] if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data data['Created'] = False def do_start(*args): # start actual Elasticsearch instance version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION start_elasticsearch_instance(version=version) data['Created'] = True try: if es_starter.check_elasticsearch(): data['Created'] = True else: LOG.error( 'Elasticsearch status is not healthy, please check the application status and logs' ) except requests.exceptions.ConnectionError: # Catch first run FuncThread(do_start).start() LOG.info('Elasticsearch is starting for the first time, please wait..') data['Created'] = True result = get_domain_status(domain_name) # record event event_publisher.fire_event( event_publisher.EVENT_ES_CREATE_DOMAIN, payload={'n': event_publisher.get_hash(domain_name)}) persistence.record('es', request=request) return jsonify(result)
def __init__(self, port, forward_host, update_listener=None, params={}): FuncThread.__init__(self, self.run_cmd, params, quiet=True) self.httpd = None self.port = port self.forward_host = forward_host self.update_listener = update_listener
def start_loop(self): self.thread = FuncThread(self.loop) self.thread.start()
cmd = 'VALIDATE_LAMBDA_S3=0 %s/bin/moto_server %s -p %s -H %s' % (LOCALSTACK_VENV_FOLDER, key, backend_port or port, constants.BIND_HOST) if not name: name = key print('Starting mock %s (%s port %s)...' % (name, get_service_protocol(), port)) if backend_port: start_proxy_for_service(key, port, backend_port, update_listener) elif USE_SSL: cmd += ' --ssl' return do_run(cmd, async) def start_local_api(name, port, method, async=False): print('Starting mock %s service (%s port %s)...' % (name, get_service_protocol(), port)) if async: thread = FuncThread(method, port, quiet=True) thread.start() TMP_THREADS.append(thread) return thread else: method(port) def stop_infra(): global INFRA_STOPPED if INFRA_STOPPED: return event_publisher.fire_event(event_publisher.EVENT_STOP_INFRA) generic_proxy.QUIET = True
moto_server_cmd = run('which moto_server').strip() cmd = 'VALIDATE_LAMBDA_S3=0 %s %s -p %s -H %s' % (moto_server_cmd, key, backend_port or port, constants.BIND_HOST) if not name: name = key print('Starting mock %s (%s port %s)...' % (name, get_service_protocol(), port)) if backend_port: start_proxy_for_service(key, port, backend_port, update_listener) elif USE_SSL: cmd += ' --ssl' return do_run(cmd, async) def start_local_api(name, port, method, async=False): print('Starting mock %s service (%s port %s)...' % (name, get_service_protocol(), port)) if async: thread = FuncThread(method, port, quiet=True) thread.start() TMP_THREADS.append(thread) return thread else: method(port) def stop_infra(): global INFRA_STOPPED if INFRA_STOPPED: return event_publisher.fire_event(event_publisher.EVENT_STOP_INFRA) generic_proxy.QUIET = True
def __init__(self): FuncThread.__init__(self, self.run_proxy, None)