示例#1
0
def stream_instance_logs_from_cloudwatch(
        sleep_time=10,
        log_group=None,
        specific_log_stream=None
):
    """
    Method streams CloudWatch logs to the terminal for the logGroup given. Since it is possible that the logGroup
    might match multiple logGroups, multiple threads can be spawned to switch between streams to display all of them
    on the same terminal.

    :param sleep_time: sleep time to refresh the logs from cloudwatch
    :param log_group: cloudwatch logGroup
    :param specific_log_stream: since all of our log streams are instance ids we require this if we want a single stream
    """
    streamer = io.get_event_streamer()
    streamer.prompt = ' -- {0} -- (Ctrl+C to exit)'.format(log_group)

    start_time = None
    while True:
        log_group_names = set(cloudwatch_log_stream_names(log_group, specific_log_stream))

        for log_group_name in log_group_names:
            _create_log_stream_for_log_group(log_group, log_group_name, streamer, sleep_time, start_time)
            _delay_subsequent_stream_creation()

        _wait_to_poll_cloudwatch()

        start_time = _updated_start_time()
示例#2
0
def stream_instance_logs_from_cloudwatch(
        sleep_time=10,
        log_group=None,
        specific_log_stream=None
):
    """
    Method streams CloudWatch logs to the terminal for the logGroup given. Since it is possible that the logGroup
    might match multiple logGroups, multiple threads can be spawned to switch between streams to display all of them
    on the same terminal.

    :param sleep_time: sleep time to refresh the logs from cloudwatch
    :param log_group: cloudwatch logGroup
    :param specific_log_stream: since all of our log streams are instance ids we require this if we want a single stream
    """
    streamer = io.get_event_streamer()
    streamer.prompt = ' -- {0} -- (Ctrl+C to exit)'.format(log_group)

    start_time = None
    while True:
        log_group_names = set(cloudwatch_log_stream_names(log_group, specific_log_stream))

        for log_group_name in log_group_names:
            _create_log_stream_for_log_group(log_group, log_group_name, streamer, sleep_time, start_time)
            _delay_subsequent_stream_creation()

        _wait_to_poll_cloudwatch()

        start_time = _updated_start_time()
 def test_event_streamer_with_unsafe_exit(self, mock_sys):
     mock_sys.stdout.isatty.return_value = True
     with mock.patch('ebcli.core.io.echo') as echo_mocked:
         streamer = io.get_event_streamer()
         streamer.stream_event("msg1", safe_to_quit=False)
         echo_mocked.assert_called_with(streamer.unsafe_prompt, end='')
         streamer.end_stream()
     self.assertEqual(streamer.eventcount, 1, "Expected event count to be 1 but was: {0}".format(streamer.eventcount))
 def test_pipe_streamer(self, mock_sys):
     mock_sys.stdout.isatty.return_value = False
     message = "msg1"
     with mock.patch('ebcli.core.io.echo') as echo_mocked:
         streamer = io.get_event_streamer()
         streamer.stream_event(message)
         echo_mocked.assert_called_with(message)
         streamer.end_stream()
     self.assertEqual(streamer.eventcount, 0, "Expected event count to be 0 but was: {0}".format(streamer.eventcount))
 def test_event_streamer(self, mock_sys):
     mock_sys.stdout.isatty.return_value = True
     with mock.patch('ebcli.core.io.echo') as echo_mocked:
         streamer = io.get_event_streamer()
         streamer.stream_event("msg1")
         streamer.stream_event("msg2")
         echo_mocked.assert_called_with(streamer.prompt, end='')
         streamer.end_stream()
     self.assertEqual(streamer.eventcount, 2, "Expected event count to be 2 but was: {0}".format(streamer.eventcount))
示例#6
0
def stream_platform_logs(platform_name, version, streamer=None, sleep_time=4, log_name=None, formatter=None):
    log_group_name = _get_platform_builder_group_name(platform_name)

    if streamer is None:
        streamer = io.get_event_streamer()

    if log_name is not None:
        streamer.prompt = ' -- Streaming logs for %s -- (Ctrl+C to exit)' % log_name

    stream_single_stream(log_group_name, version, streamer, sleep_time, formatter)
示例#7
0
def stream_platform_logs(response, platform_name, version, timeout):
    arn = response['PlatformSummary']['PlatformArn']
    request_id = response['ResponseMetadata']['RequestId']

    streamer = io.get_event_streamer()

    builder_events = threading.Thread(target=logsops.stream_platform_logs,
                                      args=(platform_name, version, streamer,
                                            5, None, PackerStreamFormatter()))
    builder_events.daemon = True

    builder_events.start()
    commonops.wait_for_success_events(request_id,
                                      platform_arn=arn,
                                      streamer=streamer,
                                      timeout_in_minutes=timeout or 30)
示例#8
0
def stream_platform_logs(platform_name, version, streamer=None, sleep_time=4, log_name=None, formatter=None):
    """
    Streams the logs of a custom platform
    :param platform_name: A CloudWatch logGroup in which the logStream `version` exists
    :param version: A CloudWatch logStream to poll
    :param streamer: The object that streams events to the terminal
    :param sleep_time: Time in seconds to sleep before polling CloudWatch for newer events
    :param log_name: A name used to identify the blob of output in the terminal for the logStream
    :param formatter: The object that formats the output to be displayed in the terminal
    :return: None
    """
    log_group_name = _get_platform_builder_group_name(platform_name)
    wait_for_log_group_to_come_into_existence(log_group_name, sleep_time)
    streamer = streamer or io.get_event_streamer()

    if log_name:
        streamer.prompt = ' -- Streaming logs for %s -- (Ctrl+C to exit)' % log_name

    stream_single_stream(log_group_name, version, sleep_time, None, formatter)
示例#9
0
def stream_platform_logs(platform_name, version, streamer=None, sleep_time=4, log_name=None, formatter=None):
    """
    Streams the logs of a custom platform
    :param platform_name: A CloudWatch logGroup in which the logStream `version` exists
    :param version: A CloudWatch logStream to poll
    :param streamer: The object that streams events to the terminal
    :param sleep_time: Time in seconds to sleep before polling CloudWatch for newer events
    :param log_name: A name used to identify the blob of output in the terminal for the logStream
    :param formatter: The object that formats the output to be displayed in the terminal
    :return: None
    """
    log_group_name = _get_platform_builder_group_name(platform_name)
    wait_for_log_group_to_come_into_existence(log_group_name, sleep_time)
    streamer = streamer or io.get_event_streamer()

    if log_name:
        streamer.prompt = ' -- Streaming logs for %s -- (Ctrl+C to exit)' % log_name

    stream_single_stream(log_group_name, version, sleep_time, None, formatter)
示例#10
0
def stream_cloudwatch_logs(env_name,
                           sleep_time=2,
                           log_group=None,
                           instance_id=None):
    """
        This function will stream logs to the terminal for the log group given, if multiple streams are found we will
        spawn multiple threads with each stream switch between them to stream all streams at the same time.
        :param env_name: environment name
        :param sleep_time: sleep time to refresh the logs from cloudwatch
        :param log_group: cloudwatch log group
        :param instance_id: since all of our log streams are instance ids we require this if we want a single stream
    """
    if log_group is None:
        log_group = 'awseb-{0}-activity'.format(env_name)
        log_name = 'eb-activity.log'
    else:
        log_name = get_log_name(log_group)
    stream_names = []
    streamer = io.get_event_streamer()
    streamer.prompt = ' -- {0} -- (Ctrl+C to exit)'.format(log_name)
    jobs = []
    while True:
        try:
            new_names = cloudwatch.get_all_stream_names(log_group, instance_id)
        except:
            raise NotFoundError(strings['cloudwatch-stream.notsetup'])
        if len(new_names) == 0:
            raise NotFoundError(strings['cloudwatch-logs.nostreams'].replace(
                '{log_group}', log_group))
        for name in new_names:
            if name not in stream_names:
                stream_names.append(name)

                p = threading.Thread(target=_stream_single_stream,
                                     args=(log_group, name, streamer,
                                           sleep_time))
                p.daemon = True
                jobs.append(p)
                p.start()
            time.sleep(0.2)  # offset threads

        time.sleep(10)
示例#11
0
def follow_events(app_name, env_name, platform_arn=None):
    last_time = None
    streamer = io.get_event_streamer()
    try:
        while True:
            events = elasticbeanstalk.get_new_events(app_name,
                                                     env_name,
                                                     None,
                                                     platform_arn=platform_arn,
                                                     last_event_time=last_time)

            for event in reversed(events):
                message = commonops.get_event_string(event, long_format=True)
                streamer.stream_event(message)
                last_time = event.event_date

            _sleep()
    except EndOfTestError:
        pass
    finally:
        streamer.end_stream()
示例#12
0
def create_platform_version(
        version,
        major_increment,
        minor_increment,
        patch_increment,
        instance_type,
        vpc = None,
        staged=False,
        timeout=None):

    platform_name = fileoperations.get_platform_name()
    instance_profile = fileoperations.get_instance_profile(None)
    key_name = commonops.get_default_keyname()

    if version is None:
        version = _get_latest_version(platform_name=platform_name, owner=Constants.OWNED_BY_SELF, ignored_states=[])

        if version is None:
            version = '1.0.0'
        else:
            major, minor, patch = version.split('.', 3)

            if major_increment:
                major = str(int(major) + 1)
                minor = '0'
                patch = '0'
            if minor_increment:
                minor = str(int(minor) + 1)
                patch = '0'
            if patch_increment or not(major_increment or minor_increment):
                patch = str(int(patch) + 1)

            version = "%s.%s.%s" % (major, minor, patch)

    if not VALID_PLATFORM_VERSION_FORMAT.match(version):
        raise InvalidPlatformVersionError(strings['exit.invalidversion'])

    cwd = os.getcwd()
    fileoperations._traverse_to_project_root()

    try:
        if heuristics.directory_is_empty():
            raise PlatformWorkspaceEmptyError(strings['exit.platformworkspaceempty'])
    finally:
        os.chdir(cwd)

    if not heuristics.has_platform_definition_file():
        raise PlatformWorkspaceEmptyError(strings['exit.no_pdf_file'])

    source_control = SourceControl.get_source_control()
    if source_control.untracked_changes_exist():
        io.log_warning(strings['sc.unstagedchanges'])

    version_label = source_control.get_version_label()
    if staged:
        # Make a unique version label
        timestamp = datetime.now().strftime("%y%m%d_%H%M%S")
        version_label = version_label + '-stage-' + timestamp

    file_descriptor, original_platform_yaml = tempfile.mkstemp()
    os.close(file_descriptor)

    copyfile('platform.yaml', original_platform_yaml)

    s3_bucket = None
    s3_key = None

    try:
        # Add option settings to platform.yaml
        _enable_healthd()

        s3_bucket, s3_key = get_app_version_s3_location(platform_name, version_label)

        # Create zip file if the application version doesn't exist
        if s3_bucket is None and s3_key is None:
            file_name, file_path = _zip_up_project(version_label, source_control, staged=staged)
        else:
            file_name = None
            file_path = None
    finally:
        # Restore original platform.yaml
        move(original_platform_yaml, 'platform.yaml')

    # Use existing bucket if it exists
    bucket = elasticbeanstalk.get_storage_location() if s3_bucket is None else s3_bucket

    # Use existing key if it exists
    key = platform_name + '/' + file_name if s3_key is None else s3_key

    try:
        s3.get_object_info(bucket, key)
        io.log_info('S3 Object already exists. Skipping upload.')
    except NotFoundError:
        io.log_info('Uploading archive to s3 location: ' + key)
        s3.upload_platform_version(bucket, key, file_path)

    # Just deletes the local zip
    fileoperations.delete_app_versions()
    io.log_info('Creating Platform Version ' + version_label)
    response = elasticbeanstalk.create_platform_version(
        platform_name, version, bucket, key, instance_profile, key_name, instance_type, vpc)


    # TODO: Enable this once the API returns the name of the environment associated with a
    # CreatePlatformRequest, and remove hard coded value. There is currently only one type
    # of platform builder, we may support additional builders in the future.
    #environment_name = response['PlatformSummary']['EnvironmentName']
    environment_name = 'eb-custom-platform-builder-packer'

    io.echo(colored(
        strings['platformbuildercreation.info'].format(environment_name), attrs=['reverse']))

    fileoperations.update_platform_version(version)
    commonops.set_environment_for_current_branch(environment_name)

    arn = response['PlatformSummary']['PlatformArn']
    request_id = response['ResponseMetadata']['RequestId']

    if not timeout:
        timeout = 30

    # Share streamer for platform events and builder events
    streamer = io.get_event_streamer()

    builder_events = threading.Thread(
        target=logsops.stream_platform_logs,
        args=(platform_name, version, streamer, 5, None, PackerStreamFormatter()))
    builder_events.daemon = True

    # Watch events from builder logs
    builder_events.start()
    commonops.wait_for_success_events(
        request_id,
        platform_arn=arn,
        streamer=streamer,
        timeout_in_minutes=timeout
    )
示例#13
0
def wait_for_success_events(request_id, timeout_in_minutes=None,
                            sleep_time=5, stream_events=True, can_abort=False,
                            streamer=None, app_name=None, env_name=None, version_label=None,
                            platform_arn=None, timeout_error_message=None):
    if timeout_in_minutes == 0:
        return
    if timeout_in_minutes is None:
        timeout_in_minutes = 10

    start = datetime.utcnow()
    timediff = timedelta(seconds=timeout_in_minutes * 60)

    last_time = start

    if streamer is None:
        streamer = io.get_event_streamer()

    if can_abort:
        streamer.prompt += strings['events.abortmessage']

    events = []

    safe_to_quit = True
    if version_label is not None and request_id is None:
        safe_to_quit = False

    try:
        if request_id:
            while not events:
                events = elasticbeanstalk.get_new_events(
                    app_name,
                    env_name,
                    request_id,
                    last_event_time=None,
                    platform_arn=platform_arn,
                    version_label=version_label
                )

                if len(events) > 0:
                    event = events[-1]
                    app_name = event.app_name
                    env_name = event.environment_name

                    if stream_events:
                        streamer.stream_event(
                            get_event_string(
                                event,
                                long_format=True
                            ),
                            safe_to_quit=safe_to_quit
                        )

                    _raise_if_error_event(event.message)
                    if _is_success_event(event.message):
                        return
                    last_time = event.event_date
                else:
                    _sleep(sleep_time)

        while not _timeout_reached(start, timediff):
            _sleep(sleep_time)

            events = elasticbeanstalk.get_new_events(
                app_name,
                env_name,
                request_id,
                last_event_time=last_time,
                platform_arn=platform_arn,
                version_label=version_label
            )

            if events:
                events = filter_events(
                    events,
                    env_name=env_name,
                    request_id=request_id,
                    version_label=version_label
                )

            for event in reversed(events):
                if stream_events:
                    streamer.stream_event(
                        get_event_string(
                            event,
                            long_format=True
                        ),
                        safe_to_quit=safe_to_quit
                    )
                    last_time = event.event_date

                _raise_if_error_event(event.message)
                if _is_success_event(event.message):
                    return
    finally:
        streamer.end_stream()

    if not timeout_error_message:
        timeout_error_message = strings['timeout.error'].format(timeout_in_minutes=timeout_in_minutes)

    raise TimeoutError(timeout_error_message)
示例#14
0
def wait_for_compose_events(request_id, app_name, grouped_envs, timeout_in_minutes=None,
                            sleep_time=5, stream_events=True,
                            can_abort=False):
    if timeout_in_minutes == 0:
        return
    if timeout_in_minutes is None:
        timeout_in_minutes = 15

    start = datetime.utcnow()
    timediff = timedelta(seconds=timeout_in_minutes * 60)

    last_times = []
    events_matrix = []
    successes = []

    last_time_compose = datetime.utcnow()
    compose_events = []

    for i in range(len(grouped_envs)):
        last_times.append(datetime.utcnow())
        events_matrix.append([])
        successes.append(False)

    streamer = io.get_event_streamer()
    if can_abort:
        streamer.prompt += strings['events.abortmessage']

    try:
        while not _timeout_reached(start, timediff):
            if all(successes):
                return

            compose_events = elasticbeanstalk.get_new_events(app_name=app_name,
                                                             env_name=None,
                                                             request_id=request_id,
                                                             last_event_time=last_time_compose)
            for event in reversed(compose_events):
                if stream_events:
                    streamer.stream_event(get_compose_event_string(event))
                    last_time_compose = event.event_date

            for index in range(len(grouped_envs)):
                if successes[index]:
                    continue

                _sleep(sleep_time)

                events_matrix[index] = elasticbeanstalk.get_new_events(
                    app_name, grouped_envs[index], None,
                    last_event_time=last_times[index]
                )

                for event in reversed(events_matrix[index]):
                    if stream_events:
                        streamer.stream_event(get_env_event_string(event))
                        last_times[index] = event.event_date

                    if _is_success_event(event.message):
                        successes[index] = True
    finally:
        streamer.end_stream()

    io.log_error(strings['timeout.error'])
示例#15
0
def create_platform_version(version,
                            major_increment,
                            minor_increment,
                            patch_increment,
                            instance_type,
                            vpc=None,
                            staged=False,
                            timeout=None):

    platform_name = fileoperations.get_platform_name()
    instance_profile = fileoperations.get_instance_profile(None)
    key_name = commonops.get_default_keyname()

    if version is None:
        version = _get_latest_version(platform_name=platform_name,
                                      owner=Constants.OWNED_BY_SELF,
                                      ignored_states=[])

        if version is None:
            version = '1.0.0'
        else:
            major, minor, patch = version.split('.', 3)

            if major_increment:
                major = str(int(major) + 1)
                minor = '0'
                patch = '0'
            if minor_increment:
                minor = str(int(minor) + 1)
                patch = '0'
            if patch_increment or not (major_increment or minor_increment):
                patch = str(int(patch) + 1)

            version = "%s.%s.%s" % (major, minor, patch)

    if not VALID_PLATFORM_VERSION_FORMAT.match(version):
        raise InvalidPlatformVersionError(strings['exit.invalidversion'])

    cwd = os.getcwd()
    fileoperations._traverse_to_project_root()

    try:
        if heuristics.directory_is_empty():
            raise PlatformWorkspaceEmptyError(
                strings['exit.platformworkspaceempty'])
    finally:
        os.chdir(cwd)

    if not heuristics.has_platform_definition_file():
        raise PlatformWorkspaceEmptyError(strings['exit.no_pdf_file'])

    source_control = SourceControl.get_source_control()
    if source_control.untracked_changes_exist():
        io.log_warning(strings['sc.unstagedchanges'])

    version_label = source_control.get_version_label()
    if staged:
        # Make a unique version label
        timestamp = datetime.now().strftime("%y%m%d_%H%M%S")
        version_label = version_label + '-stage-' + timestamp

    file_descriptor, original_platform_yaml = tempfile.mkstemp()
    os.close(file_descriptor)

    copyfile('platform.yaml', original_platform_yaml)

    try:
        # Add option settings to platform.yaml
        _enable_healthd()

        s3_bucket, s3_key = get_app_version_s3_location(
            platform_name, version_label)

        # Create zip file if the application version doesn't exist
        if s3_bucket is None and s3_key is None:
            file_name, file_path = _zip_up_project(version_label,
                                                   source_control,
                                                   staged=staged)
        else:
            file_name = None
            file_path = None
    finally:
        # Restore original platform.yaml
        move(original_platform_yaml, 'platform.yaml')

    # Use existing bucket if it exists
    bucket = elasticbeanstalk.get_storage_location(
    ) if s3_bucket is None else s3_bucket

    # Use existing key if it exists
    key = platform_name + '/' + file_name if s3_key is None else s3_key

    try:
        s3.get_object_info(bucket, key)
        io.log_info('S3 Object already exists. Skipping upload.')
    except NotFoundError:
        io.log_info('Uploading archive to s3 location: ' + key)
        s3.upload_platform_version(bucket, key, file_path)

    # Just deletes the local zip
    fileoperations.delete_app_versions()
    io.log_info('Creating Platform Version ' + version_label)
    response = elasticbeanstalk.create_platform_version(
        platform_name, version, bucket, key, instance_profile, key_name,
        instance_type, vpc)

    # TODO: Enable this once the API returns the name of the environment associated with a
    # CreatePlatformRequest, and remove hard coded value. There is currently only one type
    # of platform builder, we may support additional builders in the future.
    #environment_name = response['PlatformSummary']['EnvironmentName']
    environment_name = 'eb-custom-platform-builder-packer'

    io.echo(
        colored(
            strings['platformbuildercreation.info'].format(environment_name),
            attrs=['reverse']))

    fileoperations.update_platform_version(version)
    commonops.set_environment_for_current_branch(environment_name)

    arn = response['PlatformSummary']['PlatformArn']
    request_id = response['ResponseMetadata']['RequestId']

    if not timeout:
        timeout = 30

    # Share streamer for platform events and builder events
    streamer = io.get_event_streamer()

    builder_events = threading.Thread(target=logsops.stream_platform_logs,
                                      args=(platform_name, version, streamer,
                                            5, None, PackerStreamFormatter()))
    builder_events.daemon = True

    # Watch events from builder logs
    builder_events.start()
    commonops.wait_for_success_events(request_id,
                                      platform_arn=arn,
                                      streamer=streamer,
                                      timeout_in_minutes=timeout)
示例#16
0
def wait_for_success_events(request_id,
                            timeout_in_minutes=None,
                            sleep_time=5,
                            stream_events=True,
                            can_abort=False,
                            streamer=None,
                            app_name=None,
                            env_name=None,
                            version_label=None,
                            platform_arn=None,
                            timeout_error_message=None):
    if timeout_in_minutes == 0:
        return
    if timeout_in_minutes is None:
        timeout_in_minutes = 10

    start = datetime.utcnow()
    timediff = timedelta(seconds=timeout_in_minutes * 60)

    # default to now, will update if request_id is provided
    last_time = start

    if streamer is None:
        streamer = io.get_event_streamer()

    if can_abort:
        streamer.prompt += strings['events.abortmessage']

    events = []

    # If the even stream is terminated before we finish streaming application version events we will not
    #   be able to continue the command so we must warn the user it is not safe to quit.
    safe_to_quit = True
    if version_label is not None and request_id is None:
        safe_to_quit = False

    try:
        # Get first event in order to get start time
        if request_id:
            while not events:
                events = elasticbeanstalk.get_new_events(
                    app_name,
                    env_name,
                    request_id,
                    last_event_time=None,
                    platform_arn=platform_arn,
                    version_label=version_label)

                if len(events) > 0:
                    event = events[-1]
                    app_name = event.app_name
                    env_name = event.environment_name

                    if stream_events:
                        streamer.stream_event(get_event_string(
                            event, long_format=True),
                                              safe_to_quit=safe_to_quit)

                    _raise_if_error_event(event.message)
                    if _is_success_event(event.message):
                        return
                    last_time = event.event_date
                else:
                    _sleep(sleep_time)

        # Get remaining events
        while not _timeout_reached(start, timediff):
            _sleep(sleep_time)

            events = elasticbeanstalk.get_new_events(
                app_name,
                env_name,
                request_id,
                last_event_time=last_time,
                platform_arn=platform_arn,
                version_label=version_label)

            if events:
                events = filter_events(events,
                                       env_name=env_name,
                                       request_id=request_id,
                                       version_label=version_label)

            for event in reversed(events):
                if stream_events:
                    streamer.stream_event(get_event_string(event,
                                                           long_format=True),
                                          safe_to_quit=safe_to_quit)
                    # We dont need to update last_time if we are not printing.
                    # This can solve timing issues
                    last_time = event.event_date

                _raise_if_error_event(event.message)
                if _is_success_event(event.message):
                    return
    finally:
        streamer.end_stream()
    # We have timed out

    if not timeout_error_message:
        timeout_error_message = strings['timeout.error'].format(
            timeout_in_minutes=timeout_in_minutes)

    raise TimeoutError(timeout_error_message)