def __init__(self, config, connection):
        self._lock = Lock()
        self._batch = queue.Queue()

        self._last_commit = time()
        self.config = config
        self._amqp_connection = connection
        self._started = queue.Queue()
        self._reset_cache()
        # exception stored here will be raised by the main thread
        self.error_exit = None
Пример #2
0
    def channel_method(self,
                       method,
                       channel=None,
                       wait=True,
                       timeout=None,
                       **kwargs):
        """Schedule a channel method to be called from the connection thread.

        Use this to schedule a channel method such as .publish or .basic_ack
        to be called from the connection thread.
        """
        if wait and self._consumer_thread \
                and self._consumer_thread is threading.current_thread():
            # when sending from the connection thread, we can't wait because
            # then we wouldn't allow the actual send loop (._process_publish)
            # to run, because we'd block on the err_queue here
            raise RuntimeError(
                'Cannot wait when sending from the connection thread')

        # the message is going to be sent from another thread (the .consume
        # thread). If an error happens there, we must have a way to get it
        # back out, so we pass a Queue together with the message, that will
        # contain either an exception instance, or None
        err_queue = queue.Queue() if wait else None
        envelope = {
            'method': method,
            'message': kwargs,
            'err_queue': err_queue,
            'channel': channel
        }
        self._connection_tasks_queue.put(envelope)
        if err_queue:
            err = err_queue.get(timeout=timeout)
            if isinstance(err, Exception):
                raise err
Пример #3
0
 def __init__(self, ctx, port=None):
     port = port or get_unused_port()
     socket_url = 'http://localhost:{0}'.format(port)
     super(HTTPCtxProxy, self).__init__(ctx, socket_url)
     self.port = port
     self._started = queue.Queue(1)
     self.thread = self._start_server()
     self._started.get(timeout=5)
Пример #4
0
 def __init__(self, *client_args, **client_kwargs):
     self.client_started = Event()
     self._connect_lock = RLock()
     self._callers = 0
     self._thread = None
     self._queue = queue.Queue()
     self._client_args = client_args
     self._client_kwargs = client_kwargs
Пример #5
0
    def _restore_inter_deployment_dependencies(self):
        # managers older than 4.6.0 didn't have the support get_capability.
        # manager newer than 5.0.5 have the inter deployment dependencies as
        # part of the database dump
        if (self._snapshot_version < V_4_6_0
                or self._snapshot_version > V_5_0_5):
            return

        ctx.logger.info('Restoring inter deployment dependencies')
        update_service_composition = (self._snapshot_version == V_5_0_5)

        deployment_contexts = utils.get_dep_contexts(self._snapshot_version)
        deployments_queue = queue.Queue()
        failed_deployments_queue = queue.Queue()
        for tenant_name, deployments in deployment_contexts:
            for dep_id in deployments:
                deployments_queue.put((tenant_name, dep_id))

        wf_context = current_workflow_ctx.get_ctx()
        context_params = current_workflow_ctx.get_parameters()

        threads = []
        for i in range(
                min(self._config.snapshot_restore_threads,
                    deployments_queue.qsize())):
            t = threading.Thread(
                target=self._create_inter_deployment_dependencies,
                args=(deployments_queue, failed_deployments_queue, wf_context,
                      context_params, update_service_composition))
            threads.append(t)
            t.start()

        for t in threads:
            t.join()

        if not failed_deployments_queue.empty():
            deployments = list(failed_deployments_queue.queue)
            raise NonRecoverableError('Failed to restore snapshot, could not '
                                      'create the inter deployment '
                                      'dependencies from the following '
                                      'deployments {0}. See exception '
                                      'tracebacks logged above for more '
                                      'details'.format(deployments))

        ctx.logger.info('Successfully restored inter deployment dependencies.')
    def _restore_deployment_envs(self, postgres):
        deps = utils.get_dep_contexts(self._snapshot_version)
        token_info = postgres.get_deployment_creator_ids_and_tokens()
        deps_with_failed_plugins = queue.Queue()
        failed_deployments = queue.Queue()
        threads = list()

        for tenant, deployments in deps:
            ctx.logger.info(
                'Restoring deployment environments for {tenant}'.format(
                    tenant=tenant,
                )
            )
            tenant_client = get_rest_client(tenant=tenant)

            for deployment_id, dep_ctx in deployments.items():
                # Task graph is created and executed by threads to
                # shorten restore time significantly
                wf_ctx = current_workflow_ctx.get_ctx()
                wf_parameters = current_workflow_ctx.get_parameters()
                self._semaphore.acquire()
                t = threading.Thread(target=self._get_and_execute_task_graph,
                                     args=(token_info, deployment_id, dep_ctx,
                                           tenant, tenant_client, wf_ctx,
                                           wf_parameters,
                                           deps_with_failed_plugins,
                                           failed_deployments)
                                     )
                t.setDaemon(True)
                threads.append(t)
                t.start()

        for t in threads:
            t.join()

        if not failed_deployments.empty():
            deployments = list(failed_deployments.queue)
            raise NonRecoverableError('Failed to restore snapshot, the '
                                      'following deployment environments were'
                                      ' not restored: {0}. See exception'
                                      ' tracebacks logged above for more'
                                      ' details.'.format(deployments))

        self._log_final_information(deps_with_failed_plugins)
Пример #7
0
def _create_connections():
    acks_queue = queue.Queue()
    cfy_config = config.instance
    port = BROKER_PORT_SSL if cfy_config.amqp_ca_path else BROKER_PORT_NO_SSL
    amqp_client = get_client(amqp_host=cfy_config.amqp_host,
                             amqp_user=cfy_config.amqp_username,
                             amqp_pass=cfy_config.amqp_password,
                             amqp_vhost='/',
                             amqp_port=port,
                             ssl_enabled=bool(cfy_config.amqp_ca_path),
                             ssl_cert_path=cfy_config.amqp_ca_path,
                             cls=AckingAMQPConnection)
    amqp_client.acks_queue = acks_queue
    db_publisher = DBLogEventPublisher(config.instance, amqp_client)
    amqp_consumer = AMQPLogsEventsConsumer(
        message_processor=db_publisher.process)

    amqp_client.add_handler(amqp_consumer)
    db_publisher.start()
    return amqp_client, db_publisher
Пример #8
0
    def __init__(self,
                 handlers,
                 name=None,
                 amqp_params=None,
                 connect_timeout=10):
        self._handlers = handlers
        self.name = name
        self._connection_params = self._get_connection_params()
        self._reconnect_backoff = 1
        self._closed = False
        self._amqp_params = amqp_params or AMQPParams()
        self._pika_connection = None
        self._consumer_thread = None
        self.connect_wait = threading.Event()
        self._connect_timeout = connect_timeout
        self._error = None
        self._daemon_factory = _get_daemon_factory()

        # use this queue to schedule methods to be called on the pika channel
        # from the connection thread - for sending data to rabbitmq, eg.
        # publishing messages or sending ACKs, which needs to be done from
        # the connection thread
        self._connection_tasks_queue = queue.Queue()
Пример #9
0
 def __init__(self, *args, **kwargs):
     super(BlockingRequestResponseHandler, self).__init__(*args, **kwargs)
     self._response = queue.Queue()
Пример #10
0
    def _handle_remote_workflow(self):
        tenant = self.ctx._context['tenant'].get('original_name',
                                                 self.ctx.tenant_name)
        rest = get_rest_client(tenant=tenant)
        execution = rest.executions.get(self.ctx.execution_id,
                                        _include=['status'])
        if execution.status == Execution.STARTED:
            self.ctx.resume = True

        try:
            amqp_client_utils.init_events_publisher()
            try:
                self._workflow_started()
            except InvalidExecutionUpdateStatus:
                self._workflow_cancelled()
                return api.EXECUTION_CANCELLED_RESULT

            result_queue = queue.Queue()
            t = AMQPWrappedThread(target=self._remote_workflow_child_thread,
                                  args=(result_queue, ),
                                  name='Workflow-Child')
            t.start()

            # while the child thread is executing the workflow, the parent
            # thread is polling for 'cancel' requests while also waiting for
            # messages from the child thread
            result = None
            while True:
                # check if child thread sent a message
                try:
                    data = result_queue.get(timeout=5)
                    if 'result' in data:
                        # child thread has terminated
                        result = data['result']
                        break
                    else:
                        # error occurred in child thread
                        raise data['error']
                except queue.Empty:
                    pass

                # A very hacky way to solve an edge case when trying to poll
                # for the execution status while the DB is downgraded during
                # a snapshot restore
                if self.cloudify_context['workflow_id'] == 'restore_snapshot':
                    continue

                # check for 'cancel' requests
                execution = rest.executions.get(self.ctx.execution_id,
                                                _include=['status'])
                if execution.status in [
                        Execution.CANCELLING, Execution.FORCE_CANCELLING,
                        Execution.KILL_CANCELLING
                ]:
                    # send a 'cancel' message to the child thread. It is up to
                    # the workflow implementation to check for this message
                    # and act accordingly (by stopping and raising an
                    # api.ExecutionCancelled error, or by returning the
                    # deprecated api.EXECUTION_CANCELLED_RESULT as result).
                    # parent thread then goes back to polling for messages from
                    # child thread or possibly 'force-cancelling' requests
                    api.set_cancel_request()

                if execution.status == Execution.KILL_CANCELLING:
                    # if a custom workflow function must attempt some cleanup,
                    # it might attempt to catch SIGTERM, and confirm using this
                    # flag that it is being kill-cancelled
                    api.set_kill_request()

                if execution.status in [
                        Execution.FORCE_CANCELLING, Execution.KILL_CANCELLING
                ]:
                    # force-cancel additionally stops this loop immediately
                    result = api.EXECUTION_CANCELLED_RESULT
                    break

            if result == api.EXECUTION_CANCELLED_RESULT:
                self._workflow_cancelled()
            else:
                self._workflow_succeeded()
            return result
        except exceptions.WorkflowFailed as e:
            self._workflow_failed(e)
            raise
        except BaseException as e:
            self._workflow_failed(e, traceback.format_exc())
            raise
        finally:
            amqp_client_utils.close_amqp_client()
Пример #11
0
    def _handle_remote_workflow(self):
        """Run the workflow function.

        This runs the workflow in a background thread. The main thread will
        wait for the background thread to finish, and poll the execution
        status, to check if the execution was cancelled. If so, the cancel
        flag is set, which allows the workflow function to clean up.
        If the force-cancel flag is set, then this function will return
        early, without waiting for the background thread to finish.
        """
        tenant = self.ctx._context['tenant'].get('original_name',
                                                 self.ctx.tenant_name)
        rest = get_rest_client(tenant=tenant)
        try:
            try:
                self._workflow_started()
            except InvalidExecutionUpdateStatus:
                self._workflow_cancelled()
                return api.EXECUTION_CANCELLED_RESULT

            result_queue = queue.Queue()
            t = threading.Thread(target=self._remote_workflow_child_thread,
                                 args=(result_queue, ),
                                 name='Workflow-Child')
            t.daemon = True
            t.start()

            # while the child thread is executing the workflow, the parent
            # thread is polling for 'cancel' requests while also waiting for
            # messages from the child thread
            result = None
            while True:
                # check if child thread sent a message
                try:
                    data = result_queue.get(timeout=5)
                    if 'result' in data:
                        # child thread has terminated
                        result = data['result']
                        break
                    else:
                        # error occurred in child thread
                        raise data['error']
                except queue.Empty:
                    pass

                # A very hacky way to solve an edge case when trying to poll
                # for the execution status while the DB is downgraded during
                # a snapshot restore
                if self.cloudify_context['workflow_id'] == 'restore_snapshot':
                    continue

                # check for 'cancel' requests
                execution = rest.executions.get(self.ctx.execution_id,
                                                _include=['status'])
                if execution.status in [
                        Execution.CANCELLING, Execution.FORCE_CANCELLING,
                        Execution.KILL_CANCELLING
                ]:
                    # send a 'cancel' message to the child thread. It is up to
                    # the workflow implementation to check for this message
                    # and act accordingly (by stopping and raising an
                    # api.ExecutionCancelled error, or by returning the
                    # deprecated api.EXECUTION_CANCELLED_RESULT as result).
                    # parent thread then goes back to polling for messages from
                    # child thread or possibly 'force-cancelling' requests
                    api.set_cancel_request()

                if execution.status == Execution.KILL_CANCELLING:
                    # if a custom workflow function must attempt some cleanup,
                    # it might attempt to catch SIGTERM, and confirm using this
                    # flag that it is being kill-cancelled
                    api.set_kill_request()

                if execution.status in [
                        Execution.FORCE_CANCELLING, Execution.KILL_CANCELLING
                ]:
                    # force-cancel additionally stops this loop immediately
                    result = api.EXECUTION_CANCELLED_RESULT
                    break

            if result == api.EXECUTION_CANCELLED_RESULT:
                self._workflow_cancelled()
            else:
                self._workflow_succeeded()
            return result
        except _WorkflowFuncError as e:
            self._workflow_failed(e.wrapped_exc, e.wrapped_tb)
            # `raise e.wrapped_exc from None` - but syntax that won't break py2
            e.wrapped_exc.__suppress_context__ = True
            raise e.wrapped_exc
        except BaseException as e:
            self._workflow_failed(e, traceback.format_exc())
            raise