def test_fetch_and_process_events_implicit_several_batches(self):
        event_log = {}
        self.batch_counter = 0
        self.events = range(0, 5)

        def test_events_logger(events):
            self.batch_counter += 1
            for index in range(0, len(events)):
                event_log[events[index]] = 'event {0} of {1} in batch {2}'.\
                    format(index+1, len(events), self.batch_counter)

        events_fetcher = ExecutionEventsFetcher(self.client,
                                                'execution_id',
                                                batch_size=2)
        # internally this will get 10 events in 2 batches of 2 events each
        # and a last batch of 1 event
        events_count = events_fetcher.fetch_and_process_events(
            events_handler=test_events_logger)
        # assert all events were handled
        self.assertEqual(len(self.events), events_count)
        # assert batching was as expected (2*2, 1*1)
        event_log[self.events[0]] = 'event 1 of 2 in batch 1'
        event_log[self.events[1]] = 'event 2 of 2 in batch 1'
        event_log[self.events[2]] = 'event 1 of 2 in batch 2'
        event_log[self.events[3]] = 'event 2 of 2 in batch 2'
        event_log[self.events[4]] = 'event 1 of 1 in batch 3'
        # there shouldn't be any remaining events, verify that
        remaining_events_count = events_fetcher.fetch_and_process_events()
        self.assertEqual(0, remaining_events_count)
Example #2
0
def ls(execution_id, include_logs):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info("Getting events from management server {0} for "
                "execution id '{1}' "
                "[include_logs={2}]".format(management_ip,
                                            execution_id,
                                            include_logs))
    client = utils.get_rest_client(management_ip)
    try:

        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)
        events = execution_events.fetch_all()
        events_logger = get_events_logger()
        events_logger(events)
        logger.info('\nTotal events: {0}'.format(len(events)))
    except CloudifyClientError, e:
        if e.status_code != 404:
            raise
        msg = ("Execution '{0}' not found on management server"
               .format(execution_id))
        raise CloudifyCliError(msg)
    def test_fetch_and_process_events_implicit_several_batches(self):
        event_log = {}
        self.batch_counter = 0
        self.events = range(0, 5)

        def test_events_logger(events):
            self.batch_counter += 1
            for index in range(0, len(events)):
                event_log[events[index]] = 'event {0} of {1} in batch {2}'.\
                    format(index+1, len(events), self.batch_counter)

        events_fetcher = ExecutionEventsFetcher(self.client,
                                                'execution_id',
                                                batch_size=2)
        # internally this will get 10 events in 2 batches of 2 events each
        # and a last batch of 1 event
        events_count = events_fetcher.fetch_and_process_events(
            events_handler=test_events_logger)
        # assert all events were handled
        self.assertEqual(len(self.events), events_count)
        # assert batching was as expected (2*2, 1*1)
        event_log[self.events[0]] = 'event 1 of 2 in batch 1'
        event_log[self.events[1]] = 'event 2 of 2 in batch 1'
        event_log[self.events[2]] = 'event 1 of 2 in batch 2'
        event_log[self.events[3]] = 'event 2 of 2 in batch 2'
        event_log[self.events[4]] = 'event 1 of 1 in batch 3'
        # there shouldn't be any remaining events, verify that
        remaining_events_count = events_fetcher.fetch_and_process_events()
        self.assertEqual(0, remaining_events_count)
 def test_get_remaining_events_several_batches(self):
     self.events = range(0, 10)
     execution_events = ExecutionEventsFetcher(self.client,
                                               'execution_id',
                                               batch_size=2)
     events = execution_events.fetch_events(get_remaining_events=True)
     self.assertListEqual(self.events, events)
 def test_new_events_after_fetched_all(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id')
     events_fetcher.fetch_and_process_events()
     added_events = range(20, 25)
     self.events.extend(added_events)
     added_events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(added_events), added_events_count)
 def test_new_events_after_fetched_all(self):
     self.events = range(0, 10)
     execution_events = ExecutionEventsFetcher(self.client, 'execution_id')
     execution_events.fetch_events()
     added = range(20, 25)
     self.events.extend(range(20, 25))
     events = execution_events.fetch_events()
     self.assertListEqual(added, events)
 def test_new_events_after_fetched_all(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id')
     events_fetcher.fetch_and_process_events()
     added_events = range(20, 25)
     self.events.extend(added_events)
     added_events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(added_events), added_events_count)
 def test_get_remaining_events_timeout(self):
     self.events = range(0, 20)
     execution_events = ExecutionEventsFetcher(self.client,
                                               'execution_id',
                                               batch_size=1,
                                               timeout=3)
     try:
         execution_events.fetch_events(get_remaining_events=True)
         self.fail()
     except RuntimeError:
         pass
 def test_several_batches(self):
     self.events = range(0, 10)
     execution_events = ExecutionEventsFetcher(self.client,
                                               'execution_id',
                                               batch_size=2)
     all_events = []
     for i in range(0, 5):
         events = execution_events.fetch_events()
         self.assertEqual(2, len(events))
         all_events.extend(events)
     events = execution_events.fetch_events()
     self.assertEqual(0, len(events))
     self.assertListEqual(self.events, all_events)
 def test_events_progress(self):
     self.events = range(0, 5)
     execution_events = ExecutionEventsFetcher(self.client,
                                               'execution_id',
                                               batch_size=100)
     events = execution_events.fetch_events()
     self.assertEqual(5, len(events))
     self.events.extend(range(0, 10))
     events = execution_events.fetch_events()
     self.assertEqual(10, len(events))
     self.events.extend(range(0, 5))
     events = execution_events.fetch_events()
     self.assertEqual(5, len(events))
 def test_events_processing_progress(self):
     events_bulk1 = range(0, 5)
     self.events = events_bulk1
     events_fetcher = ExecutionEventsFetcher(self.client,
                                             'execution_id',
                                             batch_size=100)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk1), events_count)
     events_bulk2 = range(0, 10)
     self.events.extend(events_bulk2)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk2), events_count)
     events_bulk3 = range(0, 7)
     self.events.extend(events_bulk3)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk3), events_count)
 def test_fetch_and_process_events_timeout(self):
     self.events = range(0, 2000000)
     events_fetcher = ExecutionEventsFetcher(self.client,
                                             'execution_id',
                                             batch_size=1)
     self.assertRaises(EventProcessingTimeoutError,
                       events_fetcher.fetch_and_process_events, timeout=2)
 def test_events_processing_progress(self):
     events_bulk1 = range(0, 5)
     self.events = events_bulk1
     events_fetcher = ExecutionEventsFetcher(self.client,
                                             'execution_id',
                                             batch_size=100)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk1), events_count)
     events_bulk2 = range(0, 10)
     self.events.extend(events_bulk2)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk2), events_count)
     events_bulk3 = range(0, 7)
     self.events.extend(events_bulk3)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(events_bulk3), events_count)
 def test_fetch_and_process_events_explicit_several_batches(self):
         total_events_count = 0
         self.events = range(0, 9)
         batch_size = 2
         events_fetcher = ExecutionEventsFetcher(self.client,
                                                 'execution_id',
                                                 batch_size=batch_size)
         for i in range(0, 4):
             events_batch_count = \
                 events_fetcher._fetch_and_process_events_batch()
             self.assertEqual(events_batch_count, batch_size)
             total_events_count += events_batch_count
         remaining_events_count = \
             events_fetcher._fetch_and_process_events_batch()
         self.assertEqual(remaining_events_count, 1)
         total_events_count += remaining_events_count
         self.assertEqual(len(self.events), total_events_count)
    def test_fetch_events_explicit_several_batches(self):
        all_fetched_events = []
        self.events = range(0, 9)
        batch_size = 2
        events_fetcher = ExecutionEventsFetcher(self.client,
                                                'execution_id',
                                                batch_size=batch_size)

        for i in range(0, 4):
            events_batch = events_fetcher._fetch_events_batch()
            self.assertEqual(len(events_batch), batch_size)
            all_fetched_events.extend(events_batch)

        remaining_events_batch = events_fetcher._fetch_events_batch()
        self.assertEqual(len(remaining_events_batch), 1)
        all_fetched_events.extend(remaining_events_batch)
        self.assertEqual(self.events, all_fetched_events)
    def test_fetch_events_explicit_several_batches(self):
        all_fetched_events = []
        self.events = range(0, 9)
        batch_size = 2
        events_fetcher = ExecutionEventsFetcher(self.client,
                                                'execution_id',
                                                batch_size=batch_size)

        for i in range(0, 4):
            events_batch = events_fetcher._fetch_events_batch()
            self.assertEqual(len(events_batch), batch_size)
            all_fetched_events.extend(events_batch)

        remaining_events_batch = events_fetcher._fetch_events_batch()
        self.assertEqual(len(remaining_events_batch), 1)
        all_fetched_events.extend(remaining_events_batch)
        self.assertEqual(self.events, all_fetched_events)
 def test_fetch_and_process_events_explicit_several_batches(self):
         total_events_count = 0
         self.events = range(0, 9)
         batch_size = 2
         events_fetcher = ExecutionEventsFetcher(self.client,
                                                 'execution_id',
                                                 batch_size=batch_size)
         for i in range(0, 4):
             events_batch_count = \
                 events_fetcher._fetch_and_process_events_batch()
             self.assertEqual(events_batch_count, batch_size)
             total_events_count += events_batch_count
         remaining_events_count = \
             events_fetcher._fetch_and_process_events_batch()
         self.assertEqual(remaining_events_count, 1)
         total_events_count += remaining_events_count
         self.assertEqual(len(self.events), total_events_count)
Example #18
0
def ls(execution_id, include_logs, tail):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info("Getting events from management server {0} for "
                "execution id '{1}' "
                "[include_logs={2}]".format(management_ip,
                                            execution_id,
                                            include_logs))
    client = utils.get_rest_client(management_ip)
    try:
        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)

        events_logger = get_events_logger()

        if tail:
            execution = wait_for_execution(client,
                                           client.executions.get(execution_id),
                                           events_handler=events_logger,
                                           include_logs=include_logs,
                                           timeout=None)   # don't timeout ever
            if execution.error:
                logger.info("Execution of workflow '{0}' for deployment "
                            "'{1}' failed. [error={2}]"
                            .format(execution.workflow_id,
                                    execution.deployment_id,
                                    execution.error))
                raise SuppressedCloudifyCliError()
            else:
                logger.info("Finished executing workflow '{0}' on deployment "
                            "'{1}'".format(execution.workflow_id,
                                           execution.deployment_id))
        else:
            # don't tail, get only the events created until now and return
            events = execution_events.fetch_and_process_events(
                events_handler=events_logger)
            logger.info('\nTotal events: {0}'.format(events))
    except CloudifyClientError, e:
        if e.status_code != 404:
            raise
        msg = ("Execution '{0}' not found on management server"
               .format(execution_id))
        raise CloudifyCliError(msg)
Example #19
0
def deployment_failed_tasks(client, workflow_id, deployment):
    node_instances = client.node_instances.list(deployment_id=deployment.id)
    if not agents_utils.is_deployment_installed(node_instances):
        return {'type': RESULT_NOT_INSTALLED, 'deployment': deployment}
    executions = client.executions.list(deployment_id=deployment.id)
    executions = [e for e in executions if e.workflow_id == workflow_id]
    if not executions:
        return {'type': RESULT_NO_EXECUTION, 'deployment': deployment}
    executions.sort(key=execution_timestamp)
    last = executions[-1]
    execution_events = ExecutionEventsFetcher(client,
                                              last.id,
                                              include_logs=False)
    events = []
    execution_events.fetch_and_process_events(
        events_handler=lambda e: events.extend(e))
    events = [e for e in events if e.get('event_type') == FAILED_TASK_TYPE]
    return {'type': RESULT_TASKS, 'failed_tasks': events, 'execution': last}
def deployment_failed_tasks(client, workflow_id, deployment):
    node_instances = client.node_instances.list(deployment_id=deployment.id)
    if not agents_utils.is_deployment_installed(node_instances):
        return {'type': RESULT_NOT_INSTALLED, 'deployment': deployment}
    executions = client.executions.list(deployment_id=deployment.id)
    executions = [e for e in executions
                  if e.workflow_id == workflow_id]
    if not executions:
        return {'type': RESULT_NO_EXECUTION, 'deployment': deployment}
    executions.sort(key=execution_timestamp)
    last = executions[-1]
    execution_events = ExecutionEventsFetcher(
        client,
        last.id,
        include_logs=False)
    events = []
    execution_events.fetch_and_process_events(
        events_handler=lambda e: events.extend(e))
    events = [e for e in events if e.get('event_type') == FAILED_TASK_TYPE]
    return {'type': RESULT_TASKS, 'failed_tasks': events, 'execution': last}
Example #21
0
def ls(execution_id, include_logs, tail, json):
    logger = get_logger()
    management_ip = utils.get_management_server_ip()
    logger.info('Listing events for execution id {0} '
                '[include_logs={1}]'.format(execution_id, include_logs))
    client = utils.get_rest_client(management_ip)
    try:
        execution_events = ExecutionEventsFetcher(
            client,
            execution_id,
            include_logs=include_logs)

        events_logger = get_events_logger(json)

        if tail:
            execution = wait_for_execution(client,
                                           client.executions.get(execution_id),
                                           events_handler=events_logger,
                                           include_logs=include_logs,
                                           timeout=None)   # don't timeout ever
            if execution.error:
                logger.info('Execution of workflow {0} for deployment '
                            '{1} failed. [error={2}]'.format(
                                execution.workflow_id,
                                execution.deployment_id,
                                execution.error))
                raise SuppressedCloudifyCliError()
            else:
                logger.info('Finished executing workflow {0} on deployment '
                            '{1}'.format(
                                execution.workflow_id,
                                execution.deployment_id))
        else:
            # don't tail, get only the events created until now and return
            events = execution_events.fetch_and_process_events(
                events_handler=events_logger)
            logger.info('\nTotal events: {0}'.format(events))
    except CloudifyClientError as e:
        if e.status_code != 404:
            raise
        raise CloudifyCliError('Execution {0} not found'.format(execution_id))
Example #22
0
def script(execution_id,
           output=None,
           batch_size=1000,
           include_logs=True,
           timeout=3600):
    """Dump events of an execution in json format."""

    fetcher = ExecutionEventsFetcher(execution_id=execution_id,
                                     client=cosmo.client,
                                     batch_size=batch_size,
                                     include_logs=include_logs)

    stream = open(output, 'w') if output else sys.stdout

    def handle(batch):
        for event in batch:
            stream.write('{0}\n'.format(json.dumps(event)))

    try:
        fetcher.fetch_and_process_events(events_handler=handle,
                                         timeout=timeout)
    finally:
        if output:
            stream.close()
 def test_fetch_and_process_events_implicit_single_batch(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id',
                                             batch_size=100)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(self.events), events_count)
 def test_single_batch(self):
     self.events = range(0, 10)
     execution_events = ExecutionEventsFetcher(self.client, 'execution_id')
     events = execution_events.fetch_events()
     self.assertListEqual(self.events, events)
 def test_no_events(self):
     execution_events = ExecutionEventsFetcher(self.client,
                                               'execution_id',
                                               batch_size=2)
     events = execution_events.fetch_events()
     self.assertEqual(0, len(events))
 def test_fetch_events_explicit_single_batch(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id',
                                             batch_size=100)
     batch_events = events_fetcher._fetch_events_batch()
     self.assertListEqual(self.events, batch_events)
 def test_no_events(self):
     events_fetcher = ExecutionEventsFetcher(self.client,
                                             'execution_id',
                                             batch_size=2)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(0, events_count)
 def test_fetch_and_process_events_implicit_single_batch(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id',
                                             batch_size=100)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(len(self.events), events_count)
 def test_fetch_events_explicit_single_batch(self):
     self.events = range(0, 10)
     events_fetcher = ExecutionEventsFetcher(self.client, 'execution_id',
                                             batch_size=100)
     batch_events = events_fetcher._fetch_events_batch()
     self.assertListEqual(self.events, batch_events)
 def test_no_events(self):
     events_fetcher = ExecutionEventsFetcher(self.client,
                                             'execution_id',
                                             batch_size=2)
     events_count = events_fetcher.fetch_and_process_events()
     self.assertEqual(0, events_count)